1 /* 2 * linux/arch/arm/kernel/traps.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * 'traps.c' handles hardware exceptions after we have saved some state in 12 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably 13 * kill the offending process. 14 */ 15 #include <linux/module.h> 16 #include <linux/signal.h> 17 #include <linux/spinlock.h> 18 #include <linux/personality.h> 19 #include <linux/kallsyms.h> 20 #include <linux/delay.h> 21 #include <linux/hardirq.h> 22 #include <linux/init.h> 23 #include <linux/uaccess.h> 24 25 #include <asm/atomic.h> 26 #include <asm/cacheflush.h> 27 #include <asm/system.h> 28 #include <asm/unistd.h> 29 #include <asm/traps.h> 30 31 #include "ptrace.h" 32 #include "signal.h" 33 34 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 35 36 #ifdef CONFIG_DEBUG_USER 37 unsigned int user_debug; 38 39 static int __init user_debug_setup(char *str) 40 { 41 get_option(&str, &user_debug); 42 return 1; 43 } 44 __setup("user_debug=", user_debug_setup); 45 #endif 46 47 static void dump_mem(const char *str, unsigned long bottom, unsigned long top); 48 49 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 50 { 51 #ifdef CONFIG_KALLSYMS 52 printk("[<%08lx>] ", where); 53 print_symbol("(%s) ", where); 54 printk("from [<%08lx>] ", from); 55 print_symbol("(%s)\n", from); 56 #else 57 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 58 #endif 59 60 if (in_exception_text(where)) 61 dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 62 } 63 64 /* 65 * Stack pointers should always be within the kernels view of 66 * physical memory. If it is not there, then we can't dump 67 * out any information relating to the stack. 68 */ 69 static int verify_stack(unsigned long sp) 70 { 71 if (sp < PAGE_OFFSET || 72 (sp > (unsigned long)high_memory && high_memory != NULL)) 73 return -EFAULT; 74 75 return 0; 76 } 77 78 /* 79 * Dump out the contents of some memory nicely... 80 */ 81 static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 82 { 83 unsigned long p = bottom & ~31; 84 mm_segment_t fs; 85 int i; 86 87 /* 88 * We need to switch to kernel mode so that we can use __get_user 89 * to safely read from kernel space. Note that we now dump the 90 * code first, just in case the backtrace kills us. 91 */ 92 fs = get_fs(); 93 set_fs(KERNEL_DS); 94 95 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); 96 97 for (p = bottom & ~31; p < top;) { 98 printk("%04lx: ", p & 0xffff); 99 100 for (i = 0; i < 8; i++, p += 4) { 101 unsigned int val; 102 103 if (p < bottom || p >= top) 104 printk(" "); 105 else { 106 __get_user(val, (unsigned long *)p); 107 printk("%08x ", val); 108 } 109 } 110 printk ("\n"); 111 } 112 113 set_fs(fs); 114 } 115 116 static void dump_instr(struct pt_regs *regs) 117 { 118 unsigned long addr = instruction_pointer(regs); 119 const int thumb = thumb_mode(regs); 120 const int width = thumb ? 4 : 8; 121 mm_segment_t fs; 122 int i; 123 124 /* 125 * We need to switch to kernel mode so that we can use __get_user 126 * to safely read from kernel space. Note that we now dump the 127 * code first, just in case the backtrace kills us. 128 */ 129 fs = get_fs(); 130 set_fs(KERNEL_DS); 131 132 printk("Code: "); 133 for (i = -4; i < 1; i++) { 134 unsigned int val, bad; 135 136 if (thumb) 137 bad = __get_user(val, &((u16 *)addr)[i]); 138 else 139 bad = __get_user(val, &((u32 *)addr)[i]); 140 141 if (!bad) 142 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); 143 else { 144 printk("bad PC value."); 145 break; 146 } 147 } 148 printk("\n"); 149 150 set_fs(fs); 151 } 152 153 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 154 { 155 unsigned int fp; 156 int ok = 1; 157 158 printk("Backtrace: "); 159 fp = regs->ARM_fp; 160 if (!fp) { 161 printk("no frame pointer"); 162 ok = 0; 163 } else if (verify_stack(fp)) { 164 printk("invalid frame pointer 0x%08x", fp); 165 ok = 0; 166 } else if (fp < (unsigned long)end_of_stack(tsk)) 167 printk("frame pointer underflow"); 168 printk("\n"); 169 170 if (ok) 171 c_backtrace(fp, processor_mode(regs)); 172 } 173 174 void dump_stack(void) 175 { 176 __backtrace(); 177 } 178 179 EXPORT_SYMBOL(dump_stack); 180 181 void show_stack(struct task_struct *tsk, unsigned long *sp) 182 { 183 unsigned long fp; 184 185 if (!tsk) 186 tsk = current; 187 188 if (tsk != current) 189 fp = thread_saved_fp(tsk); 190 else 191 asm("mov %0, fp" : "=r" (fp) : : "cc"); 192 193 c_backtrace(fp, 0x10); 194 barrier(); 195 } 196 197 #ifdef CONFIG_PREEMPT 198 #define S_PREEMPT " PREEMPT" 199 #else 200 #define S_PREEMPT "" 201 #endif 202 #ifdef CONFIG_SMP 203 #define S_SMP " SMP" 204 #else 205 #define S_SMP "" 206 #endif 207 208 static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 209 { 210 struct task_struct *tsk = thread->task; 211 static int die_counter; 212 213 printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 214 str, err, ++die_counter); 215 print_modules(); 216 __show_regs(regs); 217 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 218 tsk->comm, task_pid_nr(tsk), thread + 1); 219 220 if (!user_mode(regs) || in_interrupt()) { 221 dump_mem("Stack: ", regs->ARM_sp, 222 THREAD_SIZE + (unsigned long)task_stack_page(tsk)); 223 dump_backtrace(regs, tsk); 224 dump_instr(regs); 225 } 226 } 227 228 DEFINE_SPINLOCK(die_lock); 229 230 /* 231 * This function is protected against re-entrancy. 232 */ 233 NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) 234 { 235 struct thread_info *thread = current_thread_info(); 236 237 oops_enter(); 238 239 console_verbose(); 240 spin_lock_irq(&die_lock); 241 bust_spinlocks(1); 242 __die(str, err, thread, regs); 243 bust_spinlocks(0); 244 add_taint(TAINT_DIE); 245 spin_unlock_irq(&die_lock); 246 247 if (in_interrupt()) 248 panic("Fatal exception in interrupt"); 249 250 if (panic_on_oops) 251 panic("Fatal exception"); 252 253 oops_exit(); 254 do_exit(SIGSEGV); 255 } 256 257 void arm_notify_die(const char *str, struct pt_regs *regs, 258 struct siginfo *info, unsigned long err, unsigned long trap) 259 { 260 if (user_mode(regs)) { 261 current->thread.error_code = err; 262 current->thread.trap_no = trap; 263 264 force_sig_info(info->si_signo, info, current); 265 } else { 266 die(str, regs, err); 267 } 268 } 269 270 static LIST_HEAD(undef_hook); 271 static DEFINE_SPINLOCK(undef_lock); 272 273 void register_undef_hook(struct undef_hook *hook) 274 { 275 unsigned long flags; 276 277 spin_lock_irqsave(&undef_lock, flags); 278 list_add(&hook->node, &undef_hook); 279 spin_unlock_irqrestore(&undef_lock, flags); 280 } 281 282 void unregister_undef_hook(struct undef_hook *hook) 283 { 284 unsigned long flags; 285 286 spin_lock_irqsave(&undef_lock, flags); 287 list_del(&hook->node); 288 spin_unlock_irqrestore(&undef_lock, flags); 289 } 290 291 static int call_undef_hook(struct pt_regs *regs, unsigned int instr) 292 { 293 struct undef_hook *hook; 294 unsigned long flags; 295 int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; 296 297 spin_lock_irqsave(&undef_lock, flags); 298 list_for_each_entry(hook, &undef_hook, node) 299 if ((instr & hook->instr_mask) == hook->instr_val && 300 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) 301 fn = hook->fn; 302 spin_unlock_irqrestore(&undef_lock, flags); 303 304 return fn ? fn(regs, instr) : 1; 305 } 306 307 asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 308 { 309 unsigned int correction = thumb_mode(regs) ? 2 : 4; 310 unsigned int instr; 311 siginfo_t info; 312 void __user *pc; 313 314 /* 315 * According to the ARM ARM, PC is 2 or 4 bytes ahead, 316 * depending whether we're in Thumb mode or not. 317 * Correct this offset. 318 */ 319 regs->ARM_pc -= correction; 320 321 pc = (void __user *)instruction_pointer(regs); 322 323 if (processor_mode(regs) == SVC_MODE) { 324 instr = *(u32 *) pc; 325 } else if (thumb_mode(regs)) { 326 get_user(instr, (u16 __user *)pc); 327 } else { 328 get_user(instr, (u32 __user *)pc); 329 } 330 331 if (call_undef_hook(regs, instr) == 0) 332 return; 333 334 #ifdef CONFIG_DEBUG_USER 335 if (user_debug & UDBG_UNDEFINED) { 336 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 337 current->comm, task_pid_nr(current), pc); 338 dump_instr(regs); 339 } 340 #endif 341 342 info.si_signo = SIGILL; 343 info.si_errno = 0; 344 info.si_code = ILL_ILLOPC; 345 info.si_addr = pc; 346 347 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); 348 } 349 350 asmlinkage void do_unexp_fiq (struct pt_regs *regs) 351 { 352 printk("Hmm. Unexpected FIQ received, but trying to continue\n"); 353 printk("You may have a hardware problem...\n"); 354 } 355 356 /* 357 * bad_mode handles the impossible case in the vectors. If you see one of 358 * these, then it's extremely serious, and could mean you have buggy hardware. 359 * It never returns, and never tries to sync. We hope that we can at least 360 * dump out some state information... 361 */ 362 asmlinkage void bad_mode(struct pt_regs *regs, int reason) 363 { 364 console_verbose(); 365 366 printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]); 367 368 die("Oops - bad mode", regs, 0); 369 local_irq_disable(); 370 panic("bad mode"); 371 } 372 373 static int bad_syscall(int n, struct pt_regs *regs) 374 { 375 struct thread_info *thread = current_thread_info(); 376 siginfo_t info; 377 378 if (current->personality != PER_LINUX && 379 current->personality != PER_LINUX_32BIT && 380 thread->exec_domain->handler) { 381 thread->exec_domain->handler(n, regs); 382 return regs->ARM_r0; 383 } 384 385 #ifdef CONFIG_DEBUG_USER 386 if (user_debug & UDBG_SYSCALL) { 387 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", 388 task_pid_nr(current), current->comm, n); 389 dump_instr(regs); 390 } 391 #endif 392 393 info.si_signo = SIGILL; 394 info.si_errno = 0; 395 info.si_code = ILL_ILLTRP; 396 info.si_addr = (void __user *)instruction_pointer(regs) - 397 (thumb_mode(regs) ? 2 : 4); 398 399 arm_notify_die("Oops - bad syscall", regs, &info, n, 0); 400 401 return regs->ARM_r0; 402 } 403 404 static inline void 405 do_cache_op(unsigned long start, unsigned long end, int flags) 406 { 407 struct vm_area_struct *vma; 408 409 if (end < start || flags) 410 return; 411 412 vma = find_vma(current->active_mm, start); 413 if (vma && vma->vm_start < end) { 414 if (start < vma->vm_start) 415 start = vma->vm_start; 416 if (end > vma->vm_end) 417 end = vma->vm_end; 418 419 flush_cache_user_range(vma, start, end); 420 } 421 } 422 423 /* 424 * Handle all unrecognised system calls. 425 * 0x9f0000 - 0x9fffff are some more esoteric system calls 426 */ 427 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) 428 asmlinkage int arm_syscall(int no, struct pt_regs *regs) 429 { 430 struct thread_info *thread = current_thread_info(); 431 siginfo_t info; 432 433 if ((no >> 16) != (__ARM_NR_BASE>> 16)) 434 return bad_syscall(no, regs); 435 436 switch (no & 0xffff) { 437 case 0: /* branch through 0 */ 438 info.si_signo = SIGSEGV; 439 info.si_errno = 0; 440 info.si_code = SEGV_MAPERR; 441 info.si_addr = NULL; 442 443 arm_notify_die("branch through zero", regs, &info, 0, 0); 444 return 0; 445 446 case NR(breakpoint): /* SWI BREAK_POINT */ 447 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; 448 ptrace_break(current, regs); 449 return regs->ARM_r0; 450 451 /* 452 * Flush a region from virtual address 'r0' to virtual address 'r1' 453 * _exclusive_. There is no alignment requirement on either address; 454 * user space does not need to know the hardware cache layout. 455 * 456 * r2 contains flags. It should ALWAYS be passed as ZERO until it 457 * is defined to be something else. For now we ignore it, but may 458 * the fires of hell burn in your belly if you break this rule. ;) 459 * 460 * (at a later date, we may want to allow this call to not flush 461 * various aspects of the cache. Passing '0' will guarantee that 462 * everything necessary gets flushed to maintain consistency in 463 * the specified region). 464 */ 465 case NR(cacheflush): 466 do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); 467 return 0; 468 469 case NR(usr26): 470 if (!(elf_hwcap & HWCAP_26BIT)) 471 break; 472 regs->ARM_cpsr &= ~MODE32_BIT; 473 return regs->ARM_r0; 474 475 case NR(usr32): 476 if (!(elf_hwcap & HWCAP_26BIT)) 477 break; 478 regs->ARM_cpsr |= MODE32_BIT; 479 return regs->ARM_r0; 480 481 case NR(set_tls): 482 thread->tp_value = regs->ARM_r0; 483 #if defined(CONFIG_HAS_TLS_REG) 484 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); 485 #elif !defined(CONFIG_TLS_REG_EMUL) 486 /* 487 * User space must never try to access this directly. 488 * Expect your app to break eventually if you do so. 489 * The user helper at 0xffff0fe0 must be used instead. 490 * (see entry-armv.S for details) 491 */ 492 *((unsigned int *)0xffff0ff0) = regs->ARM_r0; 493 #endif 494 return 0; 495 496 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 497 /* 498 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. 499 * Return zero in r0 if *MEM was changed or non-zero if no exchange 500 * happened. Also set the user C flag accordingly. 501 * If access permissions have to be fixed up then non-zero is 502 * returned and the operation has to be re-attempted. 503 * 504 * *NOTE*: This is a ghost syscall private to the kernel. Only the 505 * __kuser_cmpxchg code in entry-armv.S should be aware of its 506 * existence. Don't ever use this from user code. 507 */ 508 case 0xfff0: 509 for (;;) { 510 extern void do_DataAbort(unsigned long addr, unsigned int fsr, 511 struct pt_regs *regs); 512 unsigned long val; 513 unsigned long addr = regs->ARM_r2; 514 struct mm_struct *mm = current->mm; 515 pgd_t *pgd; pmd_t *pmd; pte_t *pte; 516 spinlock_t *ptl; 517 518 regs->ARM_cpsr &= ~PSR_C_BIT; 519 down_read(&mm->mmap_sem); 520 pgd = pgd_offset(mm, addr); 521 if (!pgd_present(*pgd)) 522 goto bad_access; 523 pmd = pmd_offset(pgd, addr); 524 if (!pmd_present(*pmd)) 525 goto bad_access; 526 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 527 if (!pte_present(*pte) || !pte_dirty(*pte)) { 528 pte_unmap_unlock(pte, ptl); 529 goto bad_access; 530 } 531 val = *(unsigned long *)addr; 532 val -= regs->ARM_r0; 533 if (val == 0) { 534 *(unsigned long *)addr = regs->ARM_r1; 535 regs->ARM_cpsr |= PSR_C_BIT; 536 } 537 pte_unmap_unlock(pte, ptl); 538 up_read(&mm->mmap_sem); 539 return val; 540 541 bad_access: 542 up_read(&mm->mmap_sem); 543 /* simulate a write access fault */ 544 do_DataAbort(addr, 15 + (1 << 11), regs); 545 } 546 #endif 547 548 default: 549 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 550 if not implemented, rather than raising SIGILL. This 551 way the calling program can gracefully determine whether 552 a feature is supported. */ 553 if (no <= 0x7ff) 554 return -ENOSYS; 555 break; 556 } 557 #ifdef CONFIG_DEBUG_USER 558 /* 559 * experience shows that these seem to indicate that 560 * something catastrophic has happened 561 */ 562 if (user_debug & UDBG_SYSCALL) { 563 printk("[%d] %s: arm syscall %d\n", 564 task_pid_nr(current), current->comm, no); 565 dump_instr(regs); 566 if (user_mode(regs)) { 567 __show_regs(regs); 568 c_backtrace(regs->ARM_fp, processor_mode(regs)); 569 } 570 } 571 #endif 572 info.si_signo = SIGILL; 573 info.si_errno = 0; 574 info.si_code = ILL_ILLTRP; 575 info.si_addr = (void __user *)instruction_pointer(regs) - 576 (thumb_mode(regs) ? 2 : 4); 577 578 arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0); 579 return 0; 580 } 581 582 #ifdef CONFIG_TLS_REG_EMUL 583 584 /* 585 * We might be running on an ARMv6+ processor which should have the TLS 586 * register but for some reason we can't use it, or maybe an SMP system 587 * using a pre-ARMv6 processor (there are apparently a few prototypes like 588 * that in existence) and therefore access to that register must be 589 * emulated. 590 */ 591 592 static int get_tp_trap(struct pt_regs *regs, unsigned int instr) 593 { 594 int reg = (instr >> 12) & 15; 595 if (reg == 15) 596 return 1; 597 regs->uregs[reg] = current_thread_info()->tp_value; 598 regs->ARM_pc += 4; 599 return 0; 600 } 601 602 static struct undef_hook arm_mrc_hook = { 603 .instr_mask = 0x0fff0fff, 604 .instr_val = 0x0e1d0f70, 605 .cpsr_mask = PSR_T_BIT, 606 .cpsr_val = 0, 607 .fn = get_tp_trap, 608 }; 609 610 static int __init arm_mrc_hook_init(void) 611 { 612 register_undef_hook(&arm_mrc_hook); 613 return 0; 614 } 615 616 late_initcall(arm_mrc_hook_init); 617 618 #endif 619 620 void __bad_xchg(volatile void *ptr, int size) 621 { 622 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", 623 __builtin_return_address(0), ptr, size); 624 BUG(); 625 } 626 EXPORT_SYMBOL(__bad_xchg); 627 628 /* 629 * A data abort trap was taken, but we did not handle the instruction. 630 * Try to abort the user program, or panic if it was the kernel. 631 */ 632 asmlinkage void 633 baddataabort(int code, unsigned long instr, struct pt_regs *regs) 634 { 635 unsigned long addr = instruction_pointer(regs); 636 siginfo_t info; 637 638 #ifdef CONFIG_DEBUG_USER 639 if (user_debug & UDBG_BADABORT) { 640 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", 641 task_pid_nr(current), current->comm, code, instr); 642 dump_instr(regs); 643 show_pte(current->mm, addr); 644 } 645 #endif 646 647 info.si_signo = SIGILL; 648 info.si_errno = 0; 649 info.si_code = ILL_ILLOPC; 650 info.si_addr = (void __user *)addr; 651 652 arm_notify_die("unknown data abort code", regs, &info, instr, 0); 653 } 654 655 void __attribute__((noreturn)) __bug(const char *file, int line) 656 { 657 printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line); 658 *(int *)0 = 0; 659 660 /* Avoid "noreturn function does return" */ 661 for (;;); 662 } 663 EXPORT_SYMBOL(__bug); 664 665 void __readwrite_bug(const char *fn) 666 { 667 printk("%s called, but not implemented\n", fn); 668 BUG(); 669 } 670 EXPORT_SYMBOL(__readwrite_bug); 671 672 void __pte_error(const char *file, int line, unsigned long val) 673 { 674 printk("%s:%d: bad pte %08lx.\n", file, line, val); 675 } 676 677 void __pmd_error(const char *file, int line, unsigned long val) 678 { 679 printk("%s:%d: bad pmd %08lx.\n", file, line, val); 680 } 681 682 void __pgd_error(const char *file, int line, unsigned long val) 683 { 684 printk("%s:%d: bad pgd %08lx.\n", file, line, val); 685 } 686 687 asmlinkage void __div0(void) 688 { 689 printk("Division by zero in kernel.\n"); 690 dump_stack(); 691 } 692 EXPORT_SYMBOL(__div0); 693 694 void abort(void) 695 { 696 BUG(); 697 698 /* if that doesn't kill us, halt */ 699 panic("Oops failed to kill thread"); 700 } 701 EXPORT_SYMBOL(abort); 702 703 void __init trap_init(void) 704 { 705 return; 706 } 707 708 void __init early_trap_init(void) 709 { 710 unsigned long vectors = CONFIG_VECTORS_BASE; 711 extern char __stubs_start[], __stubs_end[]; 712 extern char __vectors_start[], __vectors_end[]; 713 extern char __kuser_helper_start[], __kuser_helper_end[]; 714 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 715 716 /* 717 * Copy the vectors, stubs and kuser helpers (in entry-armv.S) 718 * into the vector page, mapped at 0xffff0000, and ensure these 719 * are visible to the instruction stream. 720 */ 721 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); 722 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); 723 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); 724 725 /* 726 * Copy signal return handlers into the vector page, and 727 * set sigreturn to be a pointer to these. 728 */ 729 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 730 sizeof(sigreturn_codes)); 731 732 flush_icache_range(vectors, vectors + PAGE_SIZE); 733 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 734 } 735