xref: /linux/arch/xtensa/mm/fault.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/hardirq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/hardirq.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgalloc.h>
23 
24 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 
27 #undef DEBUG_PAGE_FAULT
28 
29 /*
30  * This routine handles page faults.  It determines the address,
31  * and the problem, and then passes it off to one of the appropriate
32  * routines.
33  *
34  * Note: does not handle Miss and MultiHit.
35  */
36 
37 void do_page_fault(struct pt_regs *regs)
38 {
39 	struct vm_area_struct * vma;
40 	struct mm_struct *mm = current->mm;
41 	unsigned int exccause = regs->exccause;
42 	unsigned int address = regs->excvaddr;
43 	siginfo_t info;
44 
45 	int is_write, is_exec;
46 	int fault;
47 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
48 
49 	info.si_code = SEGV_MAPERR;
50 
51 	/* We fault-in kernel-space virtual memory on-demand. The
52 	 * 'reference' page table is init_mm.pgd.
53 	 */
54 	if (address >= TASK_SIZE && !user_mode(regs))
55 		goto vmalloc_fault;
56 
57 	/* If we're in an interrupt or have no user
58 	 * context, we must not take the fault..
59 	 */
60 	if (in_atomic() || !mm) {
61 		bad_page_fault(regs, address, SIGSEGV);
62 		return;
63 	}
64 
65 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
66 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
67 		    exccause == EXCCAUSE_ITLB_MISS ||
68 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
69 
70 #ifdef DEBUG_PAGE_FAULT
71 	printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
72 	       address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
73 #endif
74 
75 	if (user_mode(regs))
76 		flags |= FAULT_FLAG_USER;
77 retry:
78 	down_read(&mm->mmap_sem);
79 	vma = find_vma(mm, address);
80 
81 	if (!vma)
82 		goto bad_area;
83 	if (vma->vm_start <= address)
84 		goto good_area;
85 	if (!(vma->vm_flags & VM_GROWSDOWN))
86 		goto bad_area;
87 	if (expand_stack(vma, address))
88 		goto bad_area;
89 
90 	/* Ok, we have a good vm_area for this memory access, so
91 	 * we can handle it..
92 	 */
93 
94 good_area:
95 	info.si_code = SEGV_ACCERR;
96 
97 	if (is_write) {
98 		if (!(vma->vm_flags & VM_WRITE))
99 			goto bad_area;
100 		flags |= FAULT_FLAG_WRITE;
101 	} else if (is_exec) {
102 		if (!(vma->vm_flags & VM_EXEC))
103 			goto bad_area;
104 	} else	/* Allow read even from write-only pages. */
105 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
106 			goto bad_area;
107 
108 	/* If for any reason at all we couldn't handle the fault,
109 	 * make sure we exit gracefully rather than endlessly redo
110 	 * the fault.
111 	 */
112 	fault = handle_mm_fault(mm, vma, address, flags);
113 
114 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
115 		return;
116 
117 	if (unlikely(fault & VM_FAULT_ERROR)) {
118 		if (fault & VM_FAULT_OOM)
119 			goto out_of_memory;
120 		else if (fault & VM_FAULT_SIGBUS)
121 			goto do_sigbus;
122 		BUG();
123 	}
124 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
125 		if (fault & VM_FAULT_MAJOR)
126 			current->maj_flt++;
127 		else
128 			current->min_flt++;
129 		if (fault & VM_FAULT_RETRY) {
130 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
131 			flags |= FAULT_FLAG_TRIED;
132 
133 			 /* No need to up_read(&mm->mmap_sem) as we would
134 			 * have already released it in __lock_page_or_retry
135 			 * in mm/filemap.c.
136 			 */
137 
138 			goto retry;
139 		}
140 	}
141 
142 	up_read(&mm->mmap_sem);
143 	return;
144 
145 	/* Something tried to access memory that isn't in our memory map..
146 	 * Fix it, but check if it's kernel or user first..
147 	 */
148 bad_area:
149 	up_read(&mm->mmap_sem);
150 	if (user_mode(regs)) {
151 		current->thread.bad_vaddr = address;
152 		current->thread.error_code = is_write;
153 		info.si_signo = SIGSEGV;
154 		info.si_errno = 0;
155 		/* info.si_code has been set above */
156 		info.si_addr = (void *) address;
157 		force_sig_info(SIGSEGV, &info, current);
158 		return;
159 	}
160 	bad_page_fault(regs, address, SIGSEGV);
161 	return;
162 
163 
164 	/* We ran out of memory, or some other thing happened to us that made
165 	 * us unable to handle the page fault gracefully.
166 	 */
167 out_of_memory:
168 	up_read(&mm->mmap_sem);
169 	if (!user_mode(regs))
170 		bad_page_fault(regs, address, SIGKILL);
171 	else
172 		pagefault_out_of_memory();
173 	return;
174 
175 do_sigbus:
176 	up_read(&mm->mmap_sem);
177 
178 	/* Send a sigbus, regardless of whether we were in kernel
179 	 * or user mode.
180 	 */
181 	current->thread.bad_vaddr = address;
182 	info.si_code = SIGBUS;
183 	info.si_errno = 0;
184 	info.si_code = BUS_ADRERR;
185 	info.si_addr = (void *) address;
186 	force_sig_info(SIGBUS, &info, current);
187 
188 	/* Kernel mode? Handle exceptions or die */
189 	if (!user_mode(regs))
190 		bad_page_fault(regs, address, SIGBUS);
191 	return;
192 
193 vmalloc_fault:
194 	{
195 		/* Synchronize this task's top level page-table
196 		 * with the 'reference' page table.
197 		 */
198 		struct mm_struct *act_mm = current->active_mm;
199 		int index = pgd_index(address);
200 		pgd_t *pgd, *pgd_k;
201 		pmd_t *pmd, *pmd_k;
202 		pte_t *pte_k;
203 
204 		if (act_mm == NULL)
205 			goto bad_page_fault;
206 
207 		pgd = act_mm->pgd + index;
208 		pgd_k = init_mm.pgd + index;
209 
210 		if (!pgd_present(*pgd_k))
211 			goto bad_page_fault;
212 
213 		pgd_val(*pgd) = pgd_val(*pgd_k);
214 
215 		pmd = pmd_offset(pgd, address);
216 		pmd_k = pmd_offset(pgd_k, address);
217 		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
218 			goto bad_page_fault;
219 
220 		pmd_val(*pmd) = pmd_val(*pmd_k);
221 		pte_k = pte_offset_kernel(pmd_k, address);
222 
223 		if (!pte_present(*pte_k))
224 			goto bad_page_fault;
225 		return;
226 	}
227 bad_page_fault:
228 	bad_page_fault(regs, address, SIGKILL);
229 	return;
230 }
231 
232 
233 void
234 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
235 {
236 	extern void die(const char*, struct pt_regs*, long);
237 	const struct exception_table_entry *entry;
238 
239 	/* Are we prepared to handle this kernel fault?  */
240 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
241 #ifdef DEBUG_PAGE_FAULT
242 		printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
243 				current->comm, regs->pc, entry->fixup);
244 #endif
245 		current->thread.bad_uaddr = address;
246 		regs->pc = entry->fixup;
247 		return;
248 	}
249 
250 	/* Oops. The kernel tried to access some bad page. We'll have to
251 	 * terminate things with extreme prejudice.
252 	 */
253 	printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
254 	       "address %08lx\n pc = %08lx, ra = %08lx\n",
255 	       address, regs->pc, regs->areg[0]);
256 	die("Oops", regs, sig);
257 	do_exit(sig);
258 }
259