1 /* 2 * linux/arch/m68k/kernel/traps.c 3 * 4 * Copyright (C) 1993, 1994 by Hamish Macdonald 5 * 6 * 68040 fixes by Michael Rausch 7 * 68040 fixes by Martin Apel 8 * 68040 fixes and writeback by Richard Zidlicky 9 * 68060 fixes by Roman Hodek 10 * 68060 fixes by Jesper Skov 11 * 12 * This file is subject to the terms and conditions of the GNU General Public 13 * License. See the file COPYING in the main directory of this archive 14 * for more details. 15 */ 16 17 /* 18 * Sets up all exception vectors 19 */ 20 21 #include <linux/sched.h> 22 #include <linux/sched/debug.h> 23 #include <linux/signal.h> 24 #include <linux/kernel.h> 25 #include <linux/mm.h> 26 #include <linux/module.h> 27 #include <linux/user.h> 28 #include <linux/string.h> 29 #include <linux/linkage.h> 30 #include <linux/init.h> 31 #include <linux/ptrace.h> 32 #include <linux/kallsyms.h> 33 #include <linux/extable.h> 34 35 #include <asm/setup.h> 36 #include <asm/fpu.h> 37 #include <linux/uaccess.h> 38 #include <asm/traps.h> 39 #include <asm/machdep.h> 40 #include <asm/processor.h> 41 #include <asm/siginfo.h> 42 #include <asm/tlbflush.h> 43 44 static const char *vec_names[] = { 45 [VEC_RESETSP] = "RESET SP", 46 [VEC_RESETPC] = "RESET PC", 47 [VEC_BUSERR] = "BUS ERROR", 48 [VEC_ADDRERR] = "ADDRESS ERROR", 49 [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION", 50 [VEC_ZERODIV] = "ZERO DIVIDE", 51 [VEC_CHK] = "CHK", 52 [VEC_TRAP] = "TRAPcc", 53 [VEC_PRIV] = "PRIVILEGE VIOLATION", 54 [VEC_TRACE] = "TRACE", 55 [VEC_LINE10] = "LINE 1010", 56 [VEC_LINE11] = "LINE 1111", 57 [VEC_RESV12] = "UNASSIGNED RESERVED 12", 58 [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION", 59 [VEC_FORMAT] = "FORMAT ERROR", 60 [VEC_UNINT] = "UNINITIALIZED INTERRUPT", 61 [VEC_RESV16] = "UNASSIGNED RESERVED 16", 62 [VEC_RESV17] = "UNASSIGNED RESERVED 17", 63 [VEC_RESV18] = "UNASSIGNED RESERVED 18", 64 [VEC_RESV19] = "UNASSIGNED RESERVED 19", 65 [VEC_RESV20] = "UNASSIGNED RESERVED 20", 66 [VEC_RESV21] = "UNASSIGNED RESERVED 21", 67 [VEC_RESV22] = "UNASSIGNED RESERVED 22", 68 [VEC_RESV23] = "UNASSIGNED RESERVED 23", 69 [VEC_SPUR] = "SPURIOUS INTERRUPT", 70 [VEC_INT1] = "LEVEL 1 INT", 71 [VEC_INT2] = "LEVEL 2 INT", 72 [VEC_INT3] = "LEVEL 3 INT", 73 [VEC_INT4] = "LEVEL 4 INT", 74 [VEC_INT5] = "LEVEL 5 INT", 75 [VEC_INT6] = "LEVEL 6 INT", 76 [VEC_INT7] = "LEVEL 7 INT", 77 [VEC_SYS] = "SYSCALL", 78 [VEC_TRAP1] = "TRAP #1", 79 [VEC_TRAP2] = "TRAP #2", 80 [VEC_TRAP3] = "TRAP #3", 81 [VEC_TRAP4] = "TRAP #4", 82 [VEC_TRAP5] = "TRAP #5", 83 [VEC_TRAP6] = "TRAP #6", 84 [VEC_TRAP7] = "TRAP #7", 85 [VEC_TRAP8] = "TRAP #8", 86 [VEC_TRAP9] = "TRAP #9", 87 [VEC_TRAP10] = "TRAP #10", 88 [VEC_TRAP11] = "TRAP #11", 89 [VEC_TRAP12] = "TRAP #12", 90 [VEC_TRAP13] = "TRAP #13", 91 [VEC_TRAP14] = "TRAP #14", 92 [VEC_TRAP15] = "TRAP #15", 93 [VEC_FPBRUC] = "FPCP BSUN", 94 [VEC_FPIR] = "FPCP INEXACT", 95 [VEC_FPDIVZ] = "FPCP DIV BY 0", 96 [VEC_FPUNDER] = "FPCP UNDERFLOW", 97 [VEC_FPOE] = "FPCP OPERAND ERROR", 98 [VEC_FPOVER] = "FPCP OVERFLOW", 99 [VEC_FPNAN] = "FPCP SNAN", 100 [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION", 101 [VEC_MMUCFG] = "MMU CONFIGURATION ERROR", 102 [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR", 103 [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR", 104 [VEC_RESV59] = "UNASSIGNED RESERVED 59", 105 [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60", 106 [VEC_UNIMPII] = "UNASSIGNED RESERVED 61", 107 [VEC_RESV62] = "UNASSIGNED RESERVED 62", 108 [VEC_RESV63] = "UNASSIGNED RESERVED 63", 109 }; 110 111 static const char *space_names[] = { 112 [0] = "Space 0", 113 [USER_DATA] = "User Data", 114 [USER_PROGRAM] = "User Program", 115 #ifndef CONFIG_SUN3 116 [3] = "Space 3", 117 #else 118 [FC_CONTROL] = "Control", 119 #endif 120 [4] = "Space 4", 121 [SUPER_DATA] = "Super Data", 122 [SUPER_PROGRAM] = "Super Program", 123 [CPU_SPACE] = "CPU" 124 }; 125 126 void die_if_kernel(char *,struct pt_regs *,int); 127 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, 128 unsigned long error_code); 129 int send_fault_sig(struct pt_regs *regs); 130 131 asmlinkage void trap_c(struct frame *fp); 132 133 #if defined (CONFIG_M68060) 134 static inline void access_error060 (struct frame *fp) 135 { 136 unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ 137 138 pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); 139 140 if (fslw & MMU060_BPE) { 141 /* branch prediction error -> clear branch cache */ 142 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t" 143 "orl #0x00400000,%/d0\n\t" 144 "movec %/d0,%/cacr" 145 : : : "d0" ); 146 /* return if there's no other error */ 147 if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE)) 148 return; 149 } 150 151 if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) { 152 unsigned long errorcode; 153 unsigned long addr = fp->un.fmt4.effaddr; 154 155 if (fslw & MMU060_MA) 156 addr = (addr + PAGE_SIZE - 1) & PAGE_MASK; 157 158 errorcode = 1; 159 if (fslw & MMU060_DESC_ERR) { 160 __flush_tlb040_one(addr); 161 errorcode = 0; 162 } 163 if (fslw & MMU060_W) 164 errorcode |= 2; 165 pr_debug("errorcode = %ld\n", errorcode); 166 do_page_fault(&fp->ptregs, addr, errorcode); 167 } else if (fslw & (MMU060_SEE)){ 168 /* Software Emulation Error. 169 * fault during mem_read/mem_write in ifpsp060/os.S 170 */ 171 send_fault_sig(&fp->ptregs); 172 } else if (!(fslw & (MMU060_RE|MMU060_WE)) || 173 send_fault_sig(&fp->ptregs) > 0) { 174 pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, 175 fp->un.fmt4.effaddr); 176 pr_err("68060 access error, fslw=%lx\n", fslw); 177 trap_c( fp ); 178 } 179 } 180 #endif /* CONFIG_M68060 */ 181 182 #if defined (CONFIG_M68040) 183 static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) 184 { 185 unsigned long mmusr; 186 187 set_fc(wbs); 188 189 if (iswrite) 190 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); 191 else 192 asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr)); 193 194 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr)); 195 196 set_fc(USER_DATA); 197 198 return mmusr; 199 } 200 201 static inline int do_040writeback1(unsigned short wbs, unsigned long wba, 202 unsigned long wbd) 203 { 204 int res = 0; 205 206 set_fc(wbs); 207 208 switch (wbs & WBSIZ_040) { 209 case BA_SIZE_BYTE: 210 res = put_user(wbd & 0xff, (char __user *)wba); 211 break; 212 case BA_SIZE_WORD: 213 res = put_user(wbd & 0xffff, (short __user *)wba); 214 break; 215 case BA_SIZE_LONG: 216 res = put_user(wbd, (int __user *)wba); 217 break; 218 } 219 220 set_fc(USER_DATA); 221 222 pr_debug("do_040writeback1, res=%d\n", res); 223 224 return res; 225 } 226 227 /* after an exception in a writeback the stack frame corresponding 228 * to that exception is discarded, set a few bits in the old frame 229 * to simulate what it should look like 230 */ 231 static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs) 232 { 233 fp->un.fmt7.faddr = wba; 234 fp->un.fmt7.ssw = wbs & 0xff; 235 if (wba != current->thread.faddr) 236 fp->un.fmt7.ssw |= MA_040; 237 } 238 239 static inline void do_040writebacks(struct frame *fp) 240 { 241 int res = 0; 242 #if 0 243 if (fp->un.fmt7.wb1s & WBV_040) 244 pr_err("access_error040: cannot handle 1st writeback. oops.\n"); 245 #endif 246 247 if ((fp->un.fmt7.wb2s & WBV_040) && 248 !(fp->un.fmt7.wb2s & WBTT_040)) { 249 res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, 250 fp->un.fmt7.wb2d); 251 if (res) 252 fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s); 253 else 254 fp->un.fmt7.wb2s = 0; 255 } 256 257 /* do the 2nd wb only if the first one was successful (except for a kernel wb) */ 258 if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) { 259 res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, 260 fp->un.fmt7.wb3d); 261 if (res) 262 { 263 fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s); 264 265 fp->un.fmt7.wb2s = fp->un.fmt7.wb3s; 266 fp->un.fmt7.wb3s &= (~WBV_040); 267 fp->un.fmt7.wb2a = fp->un.fmt7.wb3a; 268 fp->un.fmt7.wb2d = fp->un.fmt7.wb3d; 269 } 270 else 271 fp->un.fmt7.wb3s = 0; 272 } 273 274 if (res) 275 send_fault_sig(&fp->ptregs); 276 } 277 278 /* 279 * called from sigreturn(), must ensure userspace code didn't 280 * manipulate exception frame to circumvent protection, then complete 281 * pending writebacks 282 * we just clear TM2 to turn it into a userspace access 283 */ 284 asmlinkage void berr_040cleanup(struct frame *fp) 285 { 286 fp->un.fmt7.wb2s &= ~4; 287 fp->un.fmt7.wb3s &= ~4; 288 289 do_040writebacks(fp); 290 } 291 292 static inline void access_error040(struct frame *fp) 293 { 294 unsigned short ssw = fp->un.fmt7.ssw; 295 unsigned long mmusr; 296 297 pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); 298 pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, 299 fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); 300 pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", 301 fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, 302 fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); 303 304 if (ssw & ATC_040) { 305 unsigned long addr = fp->un.fmt7.faddr; 306 unsigned long errorcode; 307 308 /* 309 * The MMU status has to be determined AFTER the address 310 * has been corrected if there was a misaligned access (MA). 311 */ 312 if (ssw & MA_040) 313 addr = (addr + 7) & -8; 314 315 /* MMU error, get the MMUSR info for this access */ 316 mmusr = probe040(!(ssw & RW_040), addr, ssw); 317 pr_debug("mmusr = %lx\n", mmusr); 318 errorcode = 1; 319 if (!(mmusr & MMU_R_040)) { 320 /* clear the invalid atc entry */ 321 __flush_tlb040_one(addr); 322 errorcode = 0; 323 } 324 325 /* despite what documentation seems to say, RMW 326 * accesses have always both the LK and RW bits set */ 327 if (!(ssw & RW_040) || (ssw & LK_040)) 328 errorcode |= 2; 329 330 if (do_page_fault(&fp->ptregs, addr, errorcode)) { 331 pr_debug("do_page_fault() !=0\n"); 332 if (user_mode(&fp->ptregs)){ 333 /* delay writebacks after signal delivery */ 334 pr_debug(".. was usermode - return\n"); 335 return; 336 } 337 /* disable writeback into user space from kernel 338 * (if do_page_fault didn't fix the mapping, 339 * the writeback won't do good) 340 */ 341 disable_wb: 342 pr_debug(".. disabling wb2\n"); 343 if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) 344 fp->un.fmt7.wb2s &= ~WBV_040; 345 if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) 346 fp->un.fmt7.wb3s &= ~WBV_040; 347 } 348 } else { 349 /* In case of a bus error we either kill the process or expect 350 * the kernel to catch the fault, which then is also responsible 351 * for cleaning up the mess. 352 */ 353 current->thread.signo = SIGBUS; 354 current->thread.faddr = fp->un.fmt7.faddr; 355 if (send_fault_sig(&fp->ptregs) >= 0) 356 pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, 357 fp->un.fmt7.faddr); 358 goto disable_wb; 359 } 360 361 do_040writebacks(fp); 362 } 363 #endif /* CONFIG_M68040 */ 364 365 #if defined(CONFIG_SUN3) 366 #include <asm/sun3mmu.h> 367 368 extern int mmu_emu_handle_fault (unsigned long, int, int); 369 370 /* sun3 version of bus_error030 */ 371 372 static inline void bus_error030 (struct frame *fp) 373 { 374 unsigned char buserr_type = sun3_get_buserr (); 375 unsigned long addr, errorcode; 376 unsigned short ssw = fp->un.fmtb.ssw; 377 extern unsigned long _sun3_map_test_start, _sun3_map_test_end; 378 379 if (ssw & (FC | FB)) 380 pr_debug("Instruction fault at %#010lx\n", 381 ssw & FC ? 382 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 383 : 384 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 385 if (ssw & DF) 386 pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", 387 ssw & RW ? "read" : "write", 388 fp->un.fmtb.daddr, 389 space_names[ssw & DFC], fp->ptregs.pc); 390 391 /* 392 * Check if this page should be demand-mapped. This needs to go before 393 * the testing for a bad kernel-space access (demand-mapping applies 394 * to kernel accesses too). 395 */ 396 397 if ((ssw & DF) 398 && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) { 399 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0)) 400 return; 401 } 402 403 /* Check for kernel-space pagefault (BAD). */ 404 if (fp->ptregs.sr & PS_S) { 405 /* kernel fault must be a data fault to user space */ 406 if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) { 407 // try checking the kernel mappings before surrender 408 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1)) 409 return; 410 /* instruction fault or kernel data fault! */ 411 if (ssw & (FC | FB)) 412 pr_err("Instruction fault at %#010lx\n", 413 fp->ptregs.pc); 414 if (ssw & DF) { 415 /* was this fault incurred testing bus mappings? */ 416 if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) && 417 (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) { 418 send_fault_sig(&fp->ptregs); 419 return; 420 } 421 422 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 423 ssw & RW ? "read" : "write", 424 fp->un.fmtb.daddr, 425 space_names[ssw & DFC], fp->ptregs.pc); 426 } 427 pr_err("BAD KERNEL BUSERR\n"); 428 429 die_if_kernel("Oops", &fp->ptregs,0); 430 force_sig(SIGKILL); 431 return; 432 } 433 } else { 434 /* user fault */ 435 if (!(ssw & (FC | FB)) && !(ssw & DF)) 436 /* not an instruction fault or data fault! BAD */ 437 panic ("USER BUSERR w/o instruction or data fault"); 438 } 439 440 441 /* First handle the data fault, if any. */ 442 if (ssw & DF) { 443 addr = fp->un.fmtb.daddr; 444 445 // errorcode bit 0: 0 -> no page 1 -> protection fault 446 // errorcode bit 1: 0 -> read fault 1 -> write fault 447 448 // (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault 449 // (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault 450 451 if (buserr_type & SUN3_BUSERR_PROTERR) 452 errorcode = 0x01; 453 else if (buserr_type & SUN3_BUSERR_INVALID) 454 errorcode = 0x00; 455 else { 456 pr_debug("*** unexpected busfault type=%#04x\n", 457 buserr_type); 458 pr_debug("invalid %s access at %#lx from pc %#lx\n", 459 !(ssw & RW) ? "write" : "read", addr, 460 fp->ptregs.pc); 461 die_if_kernel ("Oops", &fp->ptregs, buserr_type); 462 force_sig (SIGBUS); 463 return; 464 } 465 466 //todo: wtf is RM bit? --m 467 if (!(ssw & RW) || ssw & RM) 468 errorcode |= 0x02; 469 470 /* Handle page fault. */ 471 do_page_fault (&fp->ptregs, addr, errorcode); 472 473 /* Retry the data fault now. */ 474 return; 475 } 476 477 /* Now handle the instruction fault. */ 478 479 /* Get the fault address. */ 480 if (fp->ptregs.format == 0xA) 481 addr = fp->ptregs.pc + 4; 482 else 483 addr = fp->un.fmtb.baddr; 484 if (ssw & FC) 485 addr -= 2; 486 487 if (buserr_type & SUN3_BUSERR_INVALID) { 488 if (!mmu_emu_handle_fault(addr, 1, 0)) 489 do_page_fault (&fp->ptregs, addr, 0); 490 } else { 491 pr_debug("protection fault on insn access (segv).\n"); 492 force_sig (SIGSEGV); 493 } 494 } 495 #else 496 #if defined(CPU_M68020_OR_M68030) 497 static inline void bus_error030 (struct frame *fp) 498 { 499 volatile unsigned short temp; 500 unsigned short mmusr; 501 unsigned long addr, errorcode; 502 unsigned short ssw = fp->un.fmtb.ssw; 503 #ifdef DEBUG 504 unsigned long desc; 505 #endif 506 507 pr_debug("pid = %x ", current->pid); 508 pr_debug("SSW=%#06x ", ssw); 509 510 if (ssw & (FC | FB)) 511 pr_debug("Instruction fault at %#010lx\n", 512 ssw & FC ? 513 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 514 : 515 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 516 if (ssw & DF) 517 pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", 518 ssw & RW ? "read" : "write", 519 fp->un.fmtb.daddr, 520 space_names[ssw & DFC], fp->ptregs.pc); 521 522 /* ++andreas: If a data fault and an instruction fault happen 523 at the same time map in both pages. */ 524 525 /* First handle the data fault, if any. */ 526 if (ssw & DF) { 527 addr = fp->un.fmtb.daddr; 528 529 #ifdef DEBUG 530 asm volatile ("ptestr %3,%2@,#7,%0\n\t" 531 "pmove %%psr,%1" 532 : "=a&" (desc), "=m" (temp) 533 : "a" (addr), "d" (ssw)); 534 pr_debug("mmusr is %#x for addr %#lx in task %p\n", 535 temp, addr, current); 536 pr_debug("descriptor address is 0x%p, contents %#lx\n", 537 __va(desc), *(unsigned long *)__va(desc)); 538 #else 539 asm volatile ("ptestr %2,%1@,#7\n\t" 540 "pmove %%psr,%0" 541 : "=m" (temp) : "a" (addr), "d" (ssw)); 542 #endif 543 mmusr = temp; 544 errorcode = (mmusr & MMU_I) ? 0 : 1; 545 if (!(ssw & RW) || (ssw & RM)) 546 errorcode |= 2; 547 548 if (mmusr & (MMU_I | MMU_WP)) { 549 /* We might have an exception table for this PC */ 550 if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) { 551 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 552 ssw & RW ? "read" : "write", 553 fp->un.fmtb.daddr, 554 space_names[ssw & DFC], fp->ptregs.pc); 555 goto buserr; 556 } 557 /* Don't try to do anything further if an exception was 558 handled. */ 559 if (do_page_fault (&fp->ptregs, addr, errorcode) < 0) 560 return; 561 } else if (!(mmusr & MMU_I)) { 562 /* probably a 020 cas fault */ 563 if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) 564 pr_err("unexpected bus error (%#x,%#x)\n", ssw, 565 mmusr); 566 } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { 567 pr_err("invalid %s access at %#lx from pc %#lx\n", 568 !(ssw & RW) ? "write" : "read", addr, 569 fp->ptregs.pc); 570 die_if_kernel("Oops",&fp->ptregs,mmusr); 571 force_sig(SIGSEGV); 572 return; 573 } else { 574 #if 0 575 static volatile long tlong; 576 #endif 577 578 pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", 579 !(ssw & RW) ? "write" : "read", addr, 580 fp->ptregs.pc, ssw); 581 asm volatile ("ptestr #1,%1@,#0\n\t" 582 "pmove %%psr,%0" 583 : "=m" (temp) 584 : "a" (addr)); 585 mmusr = temp; 586 587 pr_err("level 0 mmusr is %#x\n", mmusr); 588 #if 0 589 asm volatile ("pmove %%tt0,%0" 590 : "=m" (tlong)); 591 pr_debug("tt0 is %#lx, ", tlong); 592 asm volatile ("pmove %%tt1,%0" 593 : "=m" (tlong)); 594 pr_debug("tt1 is %#lx\n", tlong); 595 #endif 596 pr_debug("Unknown SIGSEGV - 1\n"); 597 die_if_kernel("Oops",&fp->ptregs,mmusr); 598 force_sig(SIGSEGV); 599 return; 600 } 601 602 /* setup an ATC entry for the access about to be retried */ 603 if (!(ssw & RW) || (ssw & RM)) 604 asm volatile ("ploadw %1,%0@" : /* no outputs */ 605 : "a" (addr), "d" (ssw)); 606 else 607 asm volatile ("ploadr %1,%0@" : /* no outputs */ 608 : "a" (addr), "d" (ssw)); 609 } 610 611 /* Now handle the instruction fault. */ 612 613 if (!(ssw & (FC|FB))) 614 return; 615 616 if (fp->ptregs.sr & PS_S) { 617 pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); 618 buserr: 619 pr_err("BAD KERNEL BUSERR\n"); 620 die_if_kernel("Oops",&fp->ptregs,0); 621 force_sig(SIGKILL); 622 return; 623 } 624 625 /* get the fault address */ 626 if (fp->ptregs.format == 10) 627 addr = fp->ptregs.pc + 4; 628 else 629 addr = fp->un.fmtb.baddr; 630 if (ssw & FC) 631 addr -= 2; 632 633 if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0) 634 /* Insn fault on same page as data fault. But we 635 should still create the ATC entry. */ 636 goto create_atc_entry; 637 638 #ifdef DEBUG 639 asm volatile ("ptestr #1,%2@,#7,%0\n\t" 640 "pmove %%psr,%1" 641 : "=a&" (desc), "=m" (temp) 642 : "a" (addr)); 643 pr_debug("mmusr is %#x for addr %#lx in task %p\n", 644 temp, addr, current); 645 pr_debug("descriptor address is 0x%p, contents %#lx\n", 646 __va(desc), *(unsigned long *)__va(desc)); 647 #else 648 asm volatile ("ptestr #1,%1@,#7\n\t" 649 "pmove %%psr,%0" 650 : "=m" (temp) : "a" (addr)); 651 #endif 652 mmusr = temp; 653 if (mmusr & MMU_I) 654 do_page_fault (&fp->ptregs, addr, 0); 655 else if (mmusr & (MMU_B|MMU_L|MMU_S)) { 656 pr_err("invalid insn access at %#lx from pc %#lx\n", 657 addr, fp->ptregs.pc); 658 pr_debug("Unknown SIGSEGV - 2\n"); 659 die_if_kernel("Oops",&fp->ptregs,mmusr); 660 force_sig(SIGSEGV); 661 return; 662 } 663 664 create_atc_entry: 665 /* setup an ATC entry for the access about to be retried */ 666 asm volatile ("ploadr #2,%0@" : /* no outputs */ 667 : "a" (addr)); 668 } 669 #endif /* CPU_M68020_OR_M68030 */ 670 #endif /* !CONFIG_SUN3 */ 671 672 #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) 673 #include <asm/mcfmmu.h> 674 675 /* 676 * The following table converts the FS encoding of a ColdFire 677 * exception stack frame into the error_code value needed by 678 * do_fault. 679 */ 680 static const unsigned char fs_err_code[] = { 681 0, /* 0000 */ 682 0, /* 0001 */ 683 0, /* 0010 */ 684 0, /* 0011 */ 685 1, /* 0100 */ 686 0, /* 0101 */ 687 0, /* 0110 */ 688 0, /* 0111 */ 689 2, /* 1000 */ 690 3, /* 1001 */ 691 2, /* 1010 */ 692 0, /* 1011 */ 693 1, /* 1100 */ 694 1, /* 1101 */ 695 0, /* 1110 */ 696 0 /* 1111 */ 697 }; 698 699 static inline void access_errorcf(unsigned int fs, struct frame *fp) 700 { 701 unsigned long mmusr, addr; 702 unsigned int err_code; 703 int need_page_fault; 704 705 mmusr = mmu_read(MMUSR); 706 addr = mmu_read(MMUAR); 707 708 /* 709 * error_code: 710 * bit 0 == 0 means no page found, 1 means protection fault 711 * bit 1 == 0 means read, 1 means write 712 */ 713 switch (fs) { 714 case 5: /* 0101 TLB opword X miss */ 715 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0); 716 addr = fp->ptregs.pc; 717 break; 718 case 6: /* 0110 TLB extension word X miss */ 719 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1); 720 addr = fp->ptregs.pc + sizeof(long); 721 break; 722 case 10: /* 1010 TLB W miss */ 723 need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0); 724 break; 725 case 14: /* 1110 TLB R miss */ 726 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0); 727 break; 728 default: 729 /* 0000 Normal */ 730 /* 0001 Reserved */ 731 /* 0010 Interrupt during debug service routine */ 732 /* 0011 Reserved */ 733 /* 0100 X Protection */ 734 /* 0111 IFP in emulator mode */ 735 /* 1000 W Protection*/ 736 /* 1001 Write error*/ 737 /* 1011 Reserved*/ 738 /* 1100 R Protection*/ 739 /* 1101 R Protection*/ 740 /* 1111 OEP in emulator mode*/ 741 need_page_fault = 1; 742 break; 743 } 744 745 if (need_page_fault) { 746 err_code = fs_err_code[fs]; 747 if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */ 748 err_code |= 2; /* bit1 - write, bit0 - protection */ 749 do_page_fault(&fp->ptregs, addr, err_code); 750 } 751 } 752 #endif /* CONFIG_COLDFIRE CONFIG_MMU */ 753 754 asmlinkage void buserr_c(struct frame *fp) 755 { 756 /* Only set esp0 if coming from user mode */ 757 if (user_mode(&fp->ptregs)) 758 current->thread.esp0 = (unsigned long) fp; 759 760 pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); 761 762 #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) 763 if (CPU_IS_COLDFIRE) { 764 unsigned int fs; 765 fs = (fp->ptregs.vector & 0x3) | 766 ((fp->ptregs.vector & 0xc00) >> 8); 767 switch (fs) { 768 case 0x5: 769 case 0x6: 770 case 0x7: 771 case 0x9: 772 case 0xa: 773 case 0xd: 774 case 0xe: 775 case 0xf: 776 access_errorcf(fs, fp); 777 return; 778 default: 779 break; 780 } 781 } 782 #endif /* CONFIG_COLDFIRE && CONFIG_MMU */ 783 784 switch (fp->ptregs.format) { 785 #if defined (CONFIG_M68060) 786 case 4: /* 68060 access error */ 787 access_error060 (fp); 788 break; 789 #endif 790 #if defined (CONFIG_M68040) 791 case 0x7: /* 68040 access error */ 792 access_error040 (fp); 793 break; 794 #endif 795 #if defined (CPU_M68020_OR_M68030) 796 case 0xa: 797 case 0xb: 798 bus_error030 (fp); 799 break; 800 #endif 801 default: 802 die_if_kernel("bad frame format",&fp->ptregs,0); 803 pr_debug("Unknown SIGSEGV - 4\n"); 804 force_sig(SIGSEGV); 805 } 806 } 807 808 809 static int kstack_depth_to_print = 48; 810 811 static void show_trace(unsigned long *stack, const char *loglvl) 812 { 813 unsigned long *endstack; 814 unsigned long addr; 815 int i; 816 817 printk("%sCall Trace:", loglvl); 818 addr = (unsigned long)stack + THREAD_SIZE - 1; 819 endstack = (unsigned long *)(addr & -THREAD_SIZE); 820 i = 0; 821 while (stack + 1 <= endstack) { 822 addr = *stack++; 823 /* 824 * If the address is either in the text segment of the 825 * kernel, or in the region which contains vmalloc'ed 826 * memory, it *may* be the address of a calling 827 * routine; if so, print it so that someone tracing 828 * down the cause of the crash will be able to figure 829 * out the call path that was taken. 830 */ 831 if (__kernel_text_address(addr)) { 832 #ifndef CONFIG_KALLSYMS 833 if (i % 5 == 0) 834 pr_cont("\n "); 835 #endif 836 pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); 837 i++; 838 } 839 } 840 pr_cont("\n"); 841 } 842 843 void show_registers(struct pt_regs *regs) 844 { 845 struct frame *fp = (struct frame *)regs; 846 u16 c, *cp; 847 unsigned long addr; 848 int i; 849 850 print_modules(); 851 pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); 852 pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); 853 pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", 854 regs->d0, regs->d1, regs->d2, regs->d3); 855 pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", 856 regs->d4, regs->d5, regs->a0, regs->a1); 857 858 pr_info("Process %s (pid: %d, task=%p)\n", 859 current->comm, task_pid_nr(current), current); 860 addr = (unsigned long)&fp->un; 861 pr_info("Frame format=%X ", regs->format); 862 switch (regs->format) { 863 case 0x2: 864 pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); 865 addr += sizeof(fp->un.fmt2); 866 break; 867 case 0x3: 868 pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); 869 addr += sizeof(fp->un.fmt3); 870 break; 871 case 0x4: 872 if (CPU_IS_060) 873 pr_cont("fault addr=%08lx fslw=%08lx\n", 874 fp->un.fmt4.effaddr, fp->un.fmt4.pc); 875 else 876 pr_cont("eff addr=%08lx pc=%08lx\n", 877 fp->un.fmt4.effaddr, fp->un.fmt4.pc); 878 addr += sizeof(fp->un.fmt4); 879 break; 880 case 0x7: 881 pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", 882 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); 883 pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", 884 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); 885 pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", 886 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); 887 pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", 888 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); 889 pr_info("push data: %08lx %08lx %08lx %08lx\n", 890 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, 891 fp->un.fmt7.pd3); 892 addr += sizeof(fp->un.fmt7); 893 break; 894 case 0x9: 895 pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); 896 addr += sizeof(fp->un.fmt9); 897 break; 898 case 0xa: 899 pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 900 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, 901 fp->un.fmta.daddr, fp->un.fmta.dobuf); 902 addr += sizeof(fp->un.fmta); 903 break; 904 case 0xb: 905 pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 906 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, 907 fp->un.fmtb.daddr, fp->un.fmtb.dobuf); 908 pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", 909 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); 910 addr += sizeof(fp->un.fmtb); 911 break; 912 default: 913 pr_cont("\n"); 914 } 915 show_stack(NULL, (unsigned long *)addr, KERN_INFO); 916 917 pr_info("Code:"); 918 cp = (u16 *)regs->pc; 919 for (i = -8; i < 16; i++) { 920 if (get_kernel_nofault(c, cp + i) && i >= 0) { 921 pr_cont(" Bad PC value."); 922 break; 923 } 924 if (i) 925 pr_cont(" %04x", c); 926 else 927 pr_cont(" <%04x>", c); 928 } 929 pr_cont("\n"); 930 } 931 932 void show_stack(struct task_struct *task, unsigned long *stack, 933 const char *loglvl) 934 { 935 unsigned long *p; 936 unsigned long *endstack; 937 int i; 938 939 if (!stack) { 940 if (task) 941 stack = (unsigned long *)task->thread.esp0; 942 else 943 stack = (unsigned long *)&stack; 944 } 945 endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); 946 947 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); 948 p = stack; 949 for (i = 0; i < kstack_depth_to_print; i++) { 950 if (p + 1 > endstack) 951 break; 952 if (i % 8 == 0) 953 pr_cont("\n "); 954 pr_cont(" %08lx", *p++); 955 } 956 pr_cont("\n"); 957 show_trace(stack, loglvl); 958 } 959 960 /* 961 * The vector number returned in the frame pointer may also contain 962 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom 963 * 2 bits, and upper 2 bits. So we need to mask out the real vector 964 * number before using it in comparisons. You don't need to do this on 965 * real 68k parts, but it won't hurt either. 966 */ 967 968 void bad_super_trap (struct frame *fp) 969 { 970 int vector = (fp->ptregs.vector >> 2) & 0xff; 971 972 console_verbose(); 973 if (vector < ARRAY_SIZE(vec_names)) 974 pr_err("*** %s *** FORMAT=%X\n", 975 vec_names[vector], 976 fp->ptregs.format); 977 else 978 pr_err("*** Exception %d *** FORMAT=%X\n", 979 vector, fp->ptregs.format); 980 if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { 981 unsigned short ssw = fp->un.fmtb.ssw; 982 983 pr_err("SSW=%#06x ", ssw); 984 985 if (ssw & RC) 986 pr_err("Pipe stage C instruction fault at %#010lx\n", 987 (fp->ptregs.format) == 0xA ? 988 fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); 989 if (ssw & RB) 990 pr_err("Pipe stage B instruction fault at %#010lx\n", 991 (fp->ptregs.format) == 0xA ? 992 fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 993 if (ssw & DF) 994 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 995 ssw & RW ? "read" : "write", 996 fp->un.fmtb.daddr, space_names[ssw & DFC], 997 fp->ptregs.pc); 998 } 999 pr_err("Current process id is %d\n", task_pid_nr(current)); 1000 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); 1001 } 1002 1003 asmlinkage void trap_c(struct frame *fp) 1004 { 1005 int sig, si_code; 1006 void __user *addr; 1007 int vector = (fp->ptregs.vector >> 2) & 0xff; 1008 1009 if (fp->ptregs.sr & PS_S) { 1010 if (vector == VEC_TRACE) { 1011 /* traced a trapping instruction on a 68020/30, 1012 * real exception will be executed afterwards. 1013 */ 1014 return; 1015 } 1016 #ifdef CONFIG_MMU 1017 if (fixup_exception(&fp->ptregs)) 1018 return; 1019 #endif 1020 bad_super_trap(fp); 1021 return; 1022 } 1023 1024 /* send the appropriate signal to the user program */ 1025 switch (vector) { 1026 case VEC_ADDRERR: 1027 si_code = BUS_ADRALN; 1028 sig = SIGBUS; 1029 break; 1030 case VEC_ILLEGAL: 1031 case VEC_LINE10: 1032 case VEC_LINE11: 1033 si_code = ILL_ILLOPC; 1034 sig = SIGILL; 1035 break; 1036 case VEC_PRIV: 1037 si_code = ILL_PRVOPC; 1038 sig = SIGILL; 1039 break; 1040 case VEC_COPROC: 1041 si_code = ILL_COPROC; 1042 sig = SIGILL; 1043 break; 1044 case VEC_TRAP1: 1045 case VEC_TRAP2: 1046 case VEC_TRAP3: 1047 case VEC_TRAP4: 1048 case VEC_TRAP5: 1049 case VEC_TRAP6: 1050 case VEC_TRAP7: 1051 case VEC_TRAP8: 1052 case VEC_TRAP9: 1053 case VEC_TRAP10: 1054 case VEC_TRAP11: 1055 case VEC_TRAP12: 1056 case VEC_TRAP13: 1057 case VEC_TRAP14: 1058 si_code = ILL_ILLTRP; 1059 sig = SIGILL; 1060 break; 1061 case VEC_FPBRUC: 1062 case VEC_FPOE: 1063 case VEC_FPNAN: 1064 si_code = FPE_FLTINV; 1065 sig = SIGFPE; 1066 break; 1067 case VEC_FPIR: 1068 si_code = FPE_FLTRES; 1069 sig = SIGFPE; 1070 break; 1071 case VEC_FPDIVZ: 1072 si_code = FPE_FLTDIV; 1073 sig = SIGFPE; 1074 break; 1075 case VEC_FPUNDER: 1076 si_code = FPE_FLTUND; 1077 sig = SIGFPE; 1078 break; 1079 case VEC_FPOVER: 1080 si_code = FPE_FLTOVF; 1081 sig = SIGFPE; 1082 break; 1083 case VEC_ZERODIV: 1084 si_code = FPE_INTDIV; 1085 sig = SIGFPE; 1086 break; 1087 case VEC_CHK: 1088 case VEC_TRAP: 1089 si_code = FPE_INTOVF; 1090 sig = SIGFPE; 1091 break; 1092 case VEC_TRACE: /* ptrace single step */ 1093 si_code = TRAP_TRACE; 1094 sig = SIGTRAP; 1095 break; 1096 case VEC_TRAP15: /* breakpoint */ 1097 si_code = TRAP_BRKPT; 1098 sig = SIGTRAP; 1099 break; 1100 default: 1101 si_code = ILL_ILLOPC; 1102 sig = SIGILL; 1103 break; 1104 } 1105 switch (fp->ptregs.format) { 1106 default: 1107 addr = (void __user *) fp->ptregs.pc; 1108 break; 1109 case 2: 1110 addr = (void __user *) fp->un.fmt2.iaddr; 1111 break; 1112 case 7: 1113 addr = (void __user *) fp->un.fmt7.effaddr; 1114 break; 1115 case 9: 1116 addr = (void __user *) fp->un.fmt9.iaddr; 1117 break; 1118 case 10: 1119 addr = (void __user *) fp->un.fmta.daddr; 1120 break; 1121 case 11: 1122 addr = (void __user*) fp->un.fmtb.daddr; 1123 break; 1124 } 1125 force_sig_fault(sig, si_code, addr); 1126 } 1127 1128 void die_if_kernel (char *str, struct pt_regs *fp, int nr) 1129 { 1130 if (!(fp->sr & PS_S)) 1131 return; 1132 1133 console_verbose(); 1134 pr_crit("%s: %08x\n", str, nr); 1135 show_registers(fp); 1136 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 1137 make_task_dead(SIGSEGV); 1138 } 1139 1140 asmlinkage void set_esp0(unsigned long ssp) 1141 { 1142 current->thread.esp0 = ssp; 1143 } 1144 1145 /* 1146 * This function is called if an error occur while accessing 1147 * user-space from the fpsp040 code. 1148 */ 1149 asmlinkage void fpsp040_die(void) 1150 { 1151 force_exit_sig(SIGSEGV); 1152 } 1153 1154 #ifdef CONFIG_M68KFPU_EMU 1155 asmlinkage void fpemu_signal(int signal, int code, void *addr) 1156 { 1157 force_sig_fault(signal, code, addr); 1158 } 1159 #endif 1160