1 /* 2 * fault.c: Page fault handlers for the Sparc. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 #include <asm/head.h> 10 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/sched.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/threads.h> 17 #include <linux/kernel.h> 18 #include <linux/signal.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/perf_event.h> 22 #include <linux/interrupt.h> 23 #include <linux/kdebug.h> 24 25 #include <asm/page.h> 26 #include <asm/pgtable.h> 27 #include <asm/openprom.h> 28 #include <asm/oplib.h> 29 #include <asm/smp.h> 30 #include <asm/traps.h> 31 #include <asm/uaccess.h> 32 33 int show_unhandled_signals = 1; 34 35 /* At boot time we determine these two values necessary for setting 36 * up the segment maps and page table entries (pte's). 37 */ 38 39 int num_contexts; 40 41 /* Return how much physical memory we have. */ 42 unsigned long probe_memory(void) 43 { 44 unsigned long total = 0; 45 int i; 46 47 for (i = 0; sp_banks[i].num_bytes; i++) 48 total += sp_banks[i].num_bytes; 49 50 return total; 51 } 52 53 static void unhandled_fault(unsigned long, struct task_struct *, 54 struct pt_regs *) __attribute__ ((noreturn)); 55 56 static void __noreturn unhandled_fault(unsigned long address, 57 struct task_struct *tsk, 58 struct pt_regs *regs) 59 { 60 if ((unsigned long) address < PAGE_SIZE) { 61 printk(KERN_ALERT 62 "Unable to handle kernel NULL pointer dereference\n"); 63 } else { 64 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n", 65 address); 66 } 67 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", 68 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); 69 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", 70 (tsk->mm ? (unsigned long) tsk->mm->pgd : 71 (unsigned long) tsk->active_mm->pgd)); 72 die_if_kernel("Oops", regs); 73 } 74 75 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 76 unsigned long address) 77 { 78 struct pt_regs regs; 79 unsigned long g2; 80 unsigned int insn; 81 int i; 82 83 i = search_extables_range(ret_pc, &g2); 84 switch (i) { 85 case 3: 86 /* load & store will be handled by fixup */ 87 return 3; 88 89 case 1: 90 /* store will be handled by fixup, load will bump out */ 91 /* for _to_ macros */ 92 insn = *((unsigned int *) pc); 93 if ((insn >> 21) & 1) 94 return 1; 95 break; 96 97 case 2: 98 /* load will be handled by fixup, store will bump out */ 99 /* for _from_ macros */ 100 insn = *((unsigned int *) pc); 101 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) 102 return 2; 103 break; 104 105 default: 106 break; 107 } 108 109 memset(®s, 0, sizeof(regs)); 110 regs.pc = pc; 111 regs.npc = pc + 4; 112 __asm__ __volatile__( 113 "rd %%psr, %0\n\t" 114 "nop\n\t" 115 "nop\n\t" 116 "nop\n" : "=r" (regs.psr)); 117 unhandled_fault(address, current, ®s); 118 119 /* Not reached */ 120 return 0; 121 } 122 123 static inline void 124 show_signal_msg(struct pt_regs *regs, int sig, int code, 125 unsigned long address, struct task_struct *tsk) 126 { 127 if (!unhandled_signal(tsk, sig)) 128 return; 129 130 if (!printk_ratelimit()) 131 return; 132 133 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", 134 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 135 tsk->comm, task_pid_nr(tsk), address, 136 (void *)regs->pc, (void *)regs->u_regs[UREG_I7], 137 (void *)regs->u_regs[UREG_FP], code); 138 139 print_vma_addr(KERN_CONT " in ", regs->pc); 140 141 printk(KERN_CONT "\n"); 142 } 143 144 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, 145 unsigned long addr) 146 { 147 siginfo_t info; 148 149 info.si_signo = sig; 150 info.si_code = code; 151 info.si_errno = 0; 152 info.si_addr = (void __user *) addr; 153 info.si_trapno = 0; 154 155 if (unlikely(show_unhandled_signals)) 156 show_signal_msg(regs, sig, info.si_code, 157 addr, current); 158 159 force_sig_info (sig, &info, current); 160 } 161 162 extern unsigned long safe_compute_effective_address(struct pt_regs *, 163 unsigned int); 164 165 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) 166 { 167 unsigned int insn; 168 169 if (text_fault) 170 return regs->pc; 171 172 if (regs->psr & PSR_PS) 173 insn = *(unsigned int *) regs->pc; 174 else 175 __get_user(insn, (unsigned int *) regs->pc); 176 177 return safe_compute_effective_address(regs, insn); 178 } 179 180 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 181 int text_fault) 182 { 183 unsigned long addr = compute_si_addr(regs, text_fault); 184 185 __do_fault_siginfo(code, sig, regs, addr); 186 } 187 188 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, 189 unsigned long address) 190 { 191 struct vm_area_struct *vma; 192 struct task_struct *tsk = current; 193 struct mm_struct *mm = tsk->mm; 194 unsigned int fixup; 195 unsigned long g2; 196 int from_user = !(regs->psr & PSR_PS); 197 int fault, code; 198 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 199 (write ? FAULT_FLAG_WRITE : 0)); 200 201 if (text_fault) 202 address = regs->pc; 203 204 /* 205 * We fault-in kernel-space virtual memory on-demand. The 206 * 'reference' page table is init_mm.pgd. 207 * 208 * NOTE! We MUST NOT take any locks for this case. We may 209 * be in an interrupt or a critical region, and should 210 * only copy the information from the master page table, 211 * nothing more. 212 */ 213 code = SEGV_MAPERR; 214 if (address >= TASK_SIZE) 215 goto vmalloc_fault; 216 217 /* 218 * If we're in an interrupt or have no user 219 * context, we must not take the fault.. 220 */ 221 if (in_atomic() || !mm) 222 goto no_context; 223 224 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 225 226 retry: 227 down_read(&mm->mmap_sem); 228 229 if (!from_user && address >= PAGE_OFFSET) 230 goto bad_area; 231 232 vma = find_vma(mm, address); 233 if (!vma) 234 goto bad_area; 235 if (vma->vm_start <= address) 236 goto good_area; 237 if (!(vma->vm_flags & VM_GROWSDOWN)) 238 goto bad_area; 239 if (expand_stack(vma, address)) 240 goto bad_area; 241 /* 242 * Ok, we have a good vm_area for this memory access, so 243 * we can handle it.. 244 */ 245 good_area: 246 code = SEGV_ACCERR; 247 if (write) { 248 if (!(vma->vm_flags & VM_WRITE)) 249 goto bad_area; 250 } else { 251 /* Allow reads even for write-only mappings */ 252 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 253 goto bad_area; 254 } 255 256 /* 257 * If for any reason at all we couldn't handle the fault, 258 * make sure we exit gracefully rather than endlessly redo 259 * the fault. 260 */ 261 fault = handle_mm_fault(mm, vma, address, flags); 262 263 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 264 return; 265 266 if (unlikely(fault & VM_FAULT_ERROR)) { 267 if (fault & VM_FAULT_OOM) 268 goto out_of_memory; 269 else if (fault & VM_FAULT_SIGBUS) 270 goto do_sigbus; 271 BUG(); 272 } 273 274 if (flags & FAULT_FLAG_ALLOW_RETRY) { 275 if (fault & VM_FAULT_MAJOR) { 276 current->maj_flt++; 277 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 278 1, regs, address); 279 } else { 280 current->min_flt++; 281 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 282 1, regs, address); 283 } 284 if (fault & VM_FAULT_RETRY) { 285 flags &= ~FAULT_FLAG_ALLOW_RETRY; 286 287 /* No need to up_read(&mm->mmap_sem) as we would 288 * have already released it in __lock_page_or_retry 289 * in mm/filemap.c. 290 */ 291 292 goto retry; 293 } 294 } 295 296 up_read(&mm->mmap_sem); 297 return; 298 299 /* 300 * Something tried to access memory that isn't in our memory map.. 301 * Fix it, but check if it's kernel or user first.. 302 */ 303 bad_area: 304 up_read(&mm->mmap_sem); 305 306 bad_area_nosemaphore: 307 /* User mode accesses just cause a SIGSEGV */ 308 if (from_user) { 309 do_fault_siginfo(code, SIGSEGV, regs, text_fault); 310 return; 311 } 312 313 /* Is this in ex_table? */ 314 no_context: 315 g2 = regs->u_regs[UREG_G2]; 316 if (!from_user) { 317 fixup = search_extables_range(regs->pc, &g2); 318 /* Values below 10 are reserved for other things */ 319 if (fixup > 10) { 320 extern const unsigned __memset_start[]; 321 extern const unsigned __memset_end[]; 322 extern const unsigned __csum_partial_copy_start[]; 323 extern const unsigned __csum_partial_copy_end[]; 324 325 #ifdef DEBUG_EXCEPTIONS 326 printk("Exception: PC<%08lx> faddr<%08lx>\n", 327 regs->pc, address); 328 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", 329 regs->pc, fixup, g2); 330 #endif 331 if ((regs->pc >= (unsigned long)__memset_start && 332 regs->pc < (unsigned long)__memset_end) || 333 (regs->pc >= (unsigned long)__csum_partial_copy_start && 334 regs->pc < (unsigned long)__csum_partial_copy_end)) { 335 regs->u_regs[UREG_I4] = address; 336 regs->u_regs[UREG_I5] = regs->pc; 337 } 338 regs->u_regs[UREG_G2] = g2; 339 regs->pc = fixup; 340 regs->npc = regs->pc + 4; 341 return; 342 } 343 } 344 345 unhandled_fault(address, tsk, regs); 346 do_exit(SIGKILL); 347 348 /* 349 * We ran out of memory, or some other thing happened to us that made 350 * us unable to handle the page fault gracefully. 351 */ 352 out_of_memory: 353 up_read(&mm->mmap_sem); 354 if (from_user) { 355 pagefault_out_of_memory(); 356 return; 357 } 358 goto no_context; 359 360 do_sigbus: 361 up_read(&mm->mmap_sem); 362 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); 363 if (!from_user) 364 goto no_context; 365 366 vmalloc_fault: 367 { 368 /* 369 * Synchronize this task's top level page-table 370 * with the 'reference' page table. 371 */ 372 int offset = pgd_index(address); 373 pgd_t *pgd, *pgd_k; 374 pmd_t *pmd, *pmd_k; 375 376 pgd = tsk->active_mm->pgd + offset; 377 pgd_k = init_mm.pgd + offset; 378 379 if (!pgd_present(*pgd)) { 380 if (!pgd_present(*pgd_k)) 381 goto bad_area_nosemaphore; 382 pgd_val(*pgd) = pgd_val(*pgd_k); 383 return; 384 } 385 386 pmd = pmd_offset(pgd, address); 387 pmd_k = pmd_offset(pgd_k, address); 388 389 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 390 goto bad_area_nosemaphore; 391 392 *pmd = *pmd_k; 393 return; 394 } 395 } 396 397 /* This always deals with user addresses. */ 398 static void force_user_fault(unsigned long address, int write) 399 { 400 struct vm_area_struct *vma; 401 struct task_struct *tsk = current; 402 struct mm_struct *mm = tsk->mm; 403 int code; 404 405 code = SEGV_MAPERR; 406 407 down_read(&mm->mmap_sem); 408 vma = find_vma(mm, address); 409 if (!vma) 410 goto bad_area; 411 if (vma->vm_start <= address) 412 goto good_area; 413 if (!(vma->vm_flags & VM_GROWSDOWN)) 414 goto bad_area; 415 if (expand_stack(vma, address)) 416 goto bad_area; 417 good_area: 418 code = SEGV_ACCERR; 419 if (write) { 420 if (!(vma->vm_flags & VM_WRITE)) 421 goto bad_area; 422 } else { 423 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 424 goto bad_area; 425 } 426 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { 427 case VM_FAULT_SIGBUS: 428 case VM_FAULT_OOM: 429 goto do_sigbus; 430 } 431 up_read(&mm->mmap_sem); 432 return; 433 bad_area: 434 up_read(&mm->mmap_sem); 435 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); 436 return; 437 438 do_sigbus: 439 up_read(&mm->mmap_sem); 440 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); 441 } 442 443 static void check_stack_aligned(unsigned long sp) 444 { 445 if (sp & 0x7UL) 446 force_sig(SIGILL, current); 447 } 448 449 void window_overflow_fault(void) 450 { 451 unsigned long sp; 452 453 sp = current_thread_info()->rwbuf_stkptrs[0]; 454 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 455 force_user_fault(sp + 0x38, 1); 456 force_user_fault(sp, 1); 457 458 check_stack_aligned(sp); 459 } 460 461 void window_underflow_fault(unsigned long sp) 462 { 463 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 464 force_user_fault(sp + 0x38, 0); 465 force_user_fault(sp, 0); 466 467 check_stack_aligned(sp); 468 } 469 470 void window_ret_fault(struct pt_regs *regs) 471 { 472 unsigned long sp; 473 474 sp = regs->u_regs[UREG_FP]; 475 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 476 force_user_fault(sp + 0x38, 0); 477 force_user_fault(sp, 0); 478 479 check_stack_aligned(sp); 480 } 481