1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * 11 * This file contains exception handler for address error exception with the 12 * special capability to execute faulting instructions in software. The 13 * handler does not try to handle the case when the program counter points 14 * to an address not aligned to a word boundary. 15 * 16 * Putting data to unaligned addresses is a bad practice even on Intel where 17 * only the performance is affected. Much worse is that such code is non- 18 * portable. Due to several programs that die on MIPS due to alignment 19 * problems I decided to implement this handler anyway though I originally 20 * didn't intend to do this at all for user code. 21 * 22 * For now I enable fixing of address errors by default to make life easier. 23 * I however intend to disable this somewhen in the future when the alignment 24 * problems with user programs have been fixed. For programmers this is the 25 * right way to go. 26 * 27 * Fixing address errors is a per process option. The option is inherited 28 * across fork(2) and execve(2) calls. If you really want to use the 29 * option in your user programs - I discourage the use of the software 30 * emulation strongly - use the following code in your userland stuff: 31 * 32 * #include <sys/sysmips.h> 33 * 34 * ... 35 * sysmips(MIPS_FIXADE, x); 36 * ... 37 * 38 * The argument x is 0 for disabling software emulation, enabled otherwise. 39 * 40 * Below a little program to play around with this feature. 41 * 42 * #include <stdio.h> 43 * #include <sys/sysmips.h> 44 * 45 * struct foo { 46 * unsigned char bar[8]; 47 * }; 48 * 49 * main(int argc, char *argv[]) 50 * { 51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 52 * unsigned int *p = (unsigned int *) (x.bar + 3); 53 * int i; 54 * 55 * if (argc > 1) 56 * sysmips(MIPS_FIXADE, atoi(argv[1])); 57 * 58 * printf("*p = %08lx\n", *p); 59 * 60 * *p = 0xdeadface; 61 * 62 * for(i = 0; i <= 7; i++) 63 * printf("%02x ", x.bar[i]); 64 * printf("\n"); 65 * } 66 * 67 * Coprocessor loads are not supported; I think this case is unimportant 68 * in the practice. 69 * 70 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 71 * exception for the R6000. 72 * A store crossing a page boundary might be executed only partially. 73 * Undo the partial store in this case. 74 */ 75 #include <linux/mm.h> 76 #include <linux/module.h> 77 #include <linux/signal.h> 78 #include <linux/smp.h> 79 #include <linux/smp_lock.h> 80 81 #include <asm/asm.h> 82 #include <asm/branch.h> 83 #include <asm/byteorder.h> 84 #include <asm/inst.h> 85 #include <asm/uaccess.h> 86 #include <asm/system.h> 87 88 #define STR(x) __STR(x) 89 #define __STR(x) #x 90 91 #ifdef CONFIG_PROC_FS 92 unsigned long unaligned_instructions; 93 #endif 94 95 static inline int emulate_load_store_insn(struct pt_regs *regs, 96 void __user *addr, unsigned int __user *pc, 97 unsigned long **regptr, unsigned long *newvalue) 98 { 99 union mips_instruction insn; 100 unsigned long value; 101 unsigned int res; 102 103 regs->regs[0] = 0; 104 *regptr=NULL; 105 106 /* 107 * This load never faults. 108 */ 109 __get_user(insn.word, pc); 110 111 switch (insn.i_format.opcode) { 112 /* 113 * These are instructions that a compiler doesn't generate. We 114 * can assume therefore that the code is MIPS-aware and 115 * really buggy. Emulating these instructions would break the 116 * semantics anyway. 117 */ 118 case ll_op: 119 case lld_op: 120 case sc_op: 121 case scd_op: 122 123 /* 124 * For these instructions the only way to create an address 125 * error is an attempted access to kernel/supervisor address 126 * space. 127 */ 128 case ldl_op: 129 case ldr_op: 130 case lwl_op: 131 case lwr_op: 132 case sdl_op: 133 case sdr_op: 134 case swl_op: 135 case swr_op: 136 case lb_op: 137 case lbu_op: 138 case sb_op: 139 goto sigbus; 140 141 /* 142 * The remaining opcodes are the ones that are really of interest. 143 */ 144 case lh_op: 145 if (!access_ok(VERIFY_READ, addr, 2)) 146 goto sigbus; 147 148 __asm__ __volatile__ (".set\tnoat\n" 149 #ifdef __BIG_ENDIAN 150 "1:\tlb\t%0, 0(%2)\n" 151 "2:\tlbu\t$1, 1(%2)\n\t" 152 #endif 153 #ifdef __LITTLE_ENDIAN 154 "1:\tlb\t%0, 1(%2)\n" 155 "2:\tlbu\t$1, 0(%2)\n\t" 156 #endif 157 "sll\t%0, 0x8\n\t" 158 "or\t%0, $1\n\t" 159 "li\t%1, 0\n" 160 "3:\t.set\tat\n\t" 161 ".section\t.fixup,\"ax\"\n\t" 162 "4:\tli\t%1, %3\n\t" 163 "j\t3b\n\t" 164 ".previous\n\t" 165 ".section\t__ex_table,\"a\"\n\t" 166 STR(PTR)"\t1b, 4b\n\t" 167 STR(PTR)"\t2b, 4b\n\t" 168 ".previous" 169 : "=&r" (value), "=r" (res) 170 : "r" (addr), "i" (-EFAULT)); 171 if (res) 172 goto fault; 173 *newvalue = value; 174 *regptr = ®s->regs[insn.i_format.rt]; 175 break; 176 177 case lw_op: 178 if (!access_ok(VERIFY_READ, addr, 4)) 179 goto sigbus; 180 181 __asm__ __volatile__ ( 182 #ifdef __BIG_ENDIAN 183 "1:\tlwl\t%0, (%2)\n" 184 "2:\tlwr\t%0, 3(%2)\n\t" 185 #endif 186 #ifdef __LITTLE_ENDIAN 187 "1:\tlwl\t%0, 3(%2)\n" 188 "2:\tlwr\t%0, (%2)\n\t" 189 #endif 190 "li\t%1, 0\n" 191 "3:\t.section\t.fixup,\"ax\"\n\t" 192 "4:\tli\t%1, %3\n\t" 193 "j\t3b\n\t" 194 ".previous\n\t" 195 ".section\t__ex_table,\"a\"\n\t" 196 STR(PTR)"\t1b, 4b\n\t" 197 STR(PTR)"\t2b, 4b\n\t" 198 ".previous" 199 : "=&r" (value), "=r" (res) 200 : "r" (addr), "i" (-EFAULT)); 201 if (res) 202 goto fault; 203 *newvalue = value; 204 *regptr = ®s->regs[insn.i_format.rt]; 205 break; 206 207 case lhu_op: 208 if (!access_ok(VERIFY_READ, addr, 2)) 209 goto sigbus; 210 211 __asm__ __volatile__ ( 212 ".set\tnoat\n" 213 #ifdef __BIG_ENDIAN 214 "1:\tlbu\t%0, 0(%2)\n" 215 "2:\tlbu\t$1, 1(%2)\n\t" 216 #endif 217 #ifdef __LITTLE_ENDIAN 218 "1:\tlbu\t%0, 1(%2)\n" 219 "2:\tlbu\t$1, 0(%2)\n\t" 220 #endif 221 "sll\t%0, 0x8\n\t" 222 "or\t%0, $1\n\t" 223 "li\t%1, 0\n" 224 "3:\t.set\tat\n\t" 225 ".section\t.fixup,\"ax\"\n\t" 226 "4:\tli\t%1, %3\n\t" 227 "j\t3b\n\t" 228 ".previous\n\t" 229 ".section\t__ex_table,\"a\"\n\t" 230 STR(PTR)"\t1b, 4b\n\t" 231 STR(PTR)"\t2b, 4b\n\t" 232 ".previous" 233 : "=&r" (value), "=r" (res) 234 : "r" (addr), "i" (-EFAULT)); 235 if (res) 236 goto fault; 237 *newvalue = value; 238 *regptr = ®s->regs[insn.i_format.rt]; 239 break; 240 241 case lwu_op: 242 #ifdef CONFIG_64BIT 243 /* 244 * A 32-bit kernel might be running on a 64-bit processor. But 245 * if we're on a 32-bit processor and an i-cache incoherency 246 * or race makes us see a 64-bit instruction here the sdl/sdr 247 * would blow up, so for now we don't handle unaligned 64-bit 248 * instructions on 32-bit kernels. 249 */ 250 if (!access_ok(VERIFY_READ, addr, 4)) 251 goto sigbus; 252 253 __asm__ __volatile__ ( 254 #ifdef __BIG_ENDIAN 255 "1:\tlwl\t%0, (%2)\n" 256 "2:\tlwr\t%0, 3(%2)\n\t" 257 #endif 258 #ifdef __LITTLE_ENDIAN 259 "1:\tlwl\t%0, 3(%2)\n" 260 "2:\tlwr\t%0, (%2)\n\t" 261 #endif 262 "dsll\t%0, %0, 32\n\t" 263 "dsrl\t%0, %0, 32\n\t" 264 "li\t%1, 0\n" 265 "3:\t.section\t.fixup,\"ax\"\n\t" 266 "4:\tli\t%1, %3\n\t" 267 "j\t3b\n\t" 268 ".previous\n\t" 269 ".section\t__ex_table,\"a\"\n\t" 270 STR(PTR)"\t1b, 4b\n\t" 271 STR(PTR)"\t2b, 4b\n\t" 272 ".previous" 273 : "=&r" (value), "=r" (res) 274 : "r" (addr), "i" (-EFAULT)); 275 if (res) 276 goto fault; 277 *newvalue = value; 278 *regptr = ®s->regs[insn.i_format.rt]; 279 break; 280 #endif /* CONFIG_64BIT */ 281 282 /* Cannot handle 64-bit instructions in 32-bit kernel */ 283 goto sigill; 284 285 case ld_op: 286 #ifdef CONFIG_64BIT 287 /* 288 * A 32-bit kernel might be running on a 64-bit processor. But 289 * if we're on a 32-bit processor and an i-cache incoherency 290 * or race makes us see a 64-bit instruction here the sdl/sdr 291 * would blow up, so for now we don't handle unaligned 64-bit 292 * instructions on 32-bit kernels. 293 */ 294 if (!access_ok(VERIFY_READ, addr, 8)) 295 goto sigbus; 296 297 __asm__ __volatile__ ( 298 #ifdef __BIG_ENDIAN 299 "1:\tldl\t%0, (%2)\n" 300 "2:\tldr\t%0, 7(%2)\n\t" 301 #endif 302 #ifdef __LITTLE_ENDIAN 303 "1:\tldl\t%0, 7(%2)\n" 304 "2:\tldr\t%0, (%2)\n\t" 305 #endif 306 "li\t%1, 0\n" 307 "3:\t.section\t.fixup,\"ax\"\n\t" 308 "4:\tli\t%1, %3\n\t" 309 "j\t3b\n\t" 310 ".previous\n\t" 311 ".section\t__ex_table,\"a\"\n\t" 312 STR(PTR)"\t1b, 4b\n\t" 313 STR(PTR)"\t2b, 4b\n\t" 314 ".previous" 315 : "=&r" (value), "=r" (res) 316 : "r" (addr), "i" (-EFAULT)); 317 if (res) 318 goto fault; 319 *newvalue = value; 320 *regptr = ®s->regs[insn.i_format.rt]; 321 break; 322 #endif /* CONFIG_64BIT */ 323 324 /* Cannot handle 64-bit instructions in 32-bit kernel */ 325 goto sigill; 326 327 case sh_op: 328 if (!access_ok(VERIFY_WRITE, addr, 2)) 329 goto sigbus; 330 331 value = regs->regs[insn.i_format.rt]; 332 __asm__ __volatile__ ( 333 #ifdef __BIG_ENDIAN 334 ".set\tnoat\n" 335 "1:\tsb\t%1, 1(%2)\n\t" 336 "srl\t$1, %1, 0x8\n" 337 "2:\tsb\t$1, 0(%2)\n\t" 338 ".set\tat\n\t" 339 #endif 340 #ifdef __LITTLE_ENDIAN 341 ".set\tnoat\n" 342 "1:\tsb\t%1, 0(%2)\n\t" 343 "srl\t$1,%1, 0x8\n" 344 "2:\tsb\t$1, 1(%2)\n\t" 345 ".set\tat\n\t" 346 #endif 347 "li\t%0, 0\n" 348 "3:\n\t" 349 ".section\t.fixup,\"ax\"\n\t" 350 "4:\tli\t%0, %3\n\t" 351 "j\t3b\n\t" 352 ".previous\n\t" 353 ".section\t__ex_table,\"a\"\n\t" 354 STR(PTR)"\t1b, 4b\n\t" 355 STR(PTR)"\t2b, 4b\n\t" 356 ".previous" 357 : "=r" (res) 358 : "r" (value), "r" (addr), "i" (-EFAULT)); 359 if (res) 360 goto fault; 361 break; 362 363 case sw_op: 364 if (!access_ok(VERIFY_WRITE, addr, 4)) 365 goto sigbus; 366 367 value = regs->regs[insn.i_format.rt]; 368 __asm__ __volatile__ ( 369 #ifdef __BIG_ENDIAN 370 "1:\tswl\t%1,(%2)\n" 371 "2:\tswr\t%1, 3(%2)\n\t" 372 #endif 373 #ifdef __LITTLE_ENDIAN 374 "1:\tswl\t%1, 3(%2)\n" 375 "2:\tswr\t%1, (%2)\n\t" 376 #endif 377 "li\t%0, 0\n" 378 "3:\n\t" 379 ".section\t.fixup,\"ax\"\n\t" 380 "4:\tli\t%0, %3\n\t" 381 "j\t3b\n\t" 382 ".previous\n\t" 383 ".section\t__ex_table,\"a\"\n\t" 384 STR(PTR)"\t1b, 4b\n\t" 385 STR(PTR)"\t2b, 4b\n\t" 386 ".previous" 387 : "=r" (res) 388 : "r" (value), "r" (addr), "i" (-EFAULT)); 389 if (res) 390 goto fault; 391 break; 392 393 case sd_op: 394 #ifdef CONFIG_64BIT 395 /* 396 * A 32-bit kernel might be running on a 64-bit processor. But 397 * if we're on a 32-bit processor and an i-cache incoherency 398 * or race makes us see a 64-bit instruction here the sdl/sdr 399 * would blow up, so for now we don't handle unaligned 64-bit 400 * instructions on 32-bit kernels. 401 */ 402 if (!access_ok(VERIFY_WRITE, addr, 8)) 403 goto sigbus; 404 405 value = regs->regs[insn.i_format.rt]; 406 __asm__ __volatile__ ( 407 #ifdef __BIG_ENDIAN 408 "1:\tsdl\t%1,(%2)\n" 409 "2:\tsdr\t%1, 7(%2)\n\t" 410 #endif 411 #ifdef __LITTLE_ENDIAN 412 "1:\tsdl\t%1, 7(%2)\n" 413 "2:\tsdr\t%1, (%2)\n\t" 414 #endif 415 "li\t%0, 0\n" 416 "3:\n\t" 417 ".section\t.fixup,\"ax\"\n\t" 418 "4:\tli\t%0, %3\n\t" 419 "j\t3b\n\t" 420 ".previous\n\t" 421 ".section\t__ex_table,\"a\"\n\t" 422 STR(PTR)"\t1b, 4b\n\t" 423 STR(PTR)"\t2b, 4b\n\t" 424 ".previous" 425 : "=r" (res) 426 : "r" (value), "r" (addr), "i" (-EFAULT)); 427 if (res) 428 goto fault; 429 break; 430 #endif /* CONFIG_64BIT */ 431 432 /* Cannot handle 64-bit instructions in 32-bit kernel */ 433 goto sigill; 434 435 case lwc1_op: 436 case ldc1_op: 437 case swc1_op: 438 case sdc1_op: 439 /* 440 * I herewith declare: this does not happen. So send SIGBUS. 441 */ 442 goto sigbus; 443 444 case lwc2_op: 445 case ldc2_op: 446 case swc2_op: 447 case sdc2_op: 448 /* 449 * These are the coprocessor 2 load/stores. The current 450 * implementations don't use cp2 and cp2 should always be 451 * disabled in c0_status. So send SIGILL. 452 * (No longer true: The Sony Praystation uses cp2 for 453 * 3D matrix operations. Dunno if that thingy has a MMU ...) 454 */ 455 default: 456 /* 457 * Pheeee... We encountered an yet unknown instruction or 458 * cache coherence problem. Die sucker, die ... 459 */ 460 goto sigill; 461 } 462 463 #ifdef CONFIG_PROC_FS 464 unaligned_instructions++; 465 #endif 466 467 return 0; 468 469 fault: 470 /* Did we have an exception handler installed? */ 471 if (fixup_exception(regs)) 472 return 1; 473 474 die_if_kernel ("Unhandled kernel unaligned access", regs); 475 send_sig(SIGSEGV, current, 1); 476 477 return 0; 478 479 sigbus: 480 die_if_kernel("Unhandled kernel unaligned access", regs); 481 send_sig(SIGBUS, current, 1); 482 483 return 0; 484 485 sigill: 486 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); 487 send_sig(SIGILL, current, 1); 488 489 return 0; 490 } 491 492 asmlinkage void do_ade(struct pt_regs *regs) 493 { 494 unsigned long *regptr, newval; 495 extern int do_dsemulret(struct pt_regs *); 496 unsigned int __user *pc; 497 mm_segment_t seg; 498 499 /* 500 * Address errors may be deliberately induced by the FPU emulator to 501 * retake control of the CPU after executing the instruction in the 502 * delay slot of an emulated branch. 503 */ 504 /* Terminate if exception was recognized as a delay slot return */ 505 if (do_dsemulret(regs)) 506 return; 507 508 /* Otherwise handle as normal */ 509 510 /* 511 * Did we catch a fault trying to load an instruction? 512 * Or are we running in MIPS16 mode? 513 */ 514 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) 515 goto sigbus; 516 517 pc = (unsigned int __user *) exception_epc(regs); 518 if ((current->thread.mflags & MF_FIXADE) == 0) 519 goto sigbus; 520 521 /* 522 * Do branch emulation only if we didn't forward the exception. 523 * This is all so but ugly ... 524 */ 525 seg = get_fs(); 526 if (!user_mode(regs)) 527 set_fs(KERNEL_DS); 528 if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc, 529 ®ptr, &newval)) { 530 compute_return_epc(regs); 531 /* 532 * Now that branch is evaluated, update the dest 533 * register if necessary 534 */ 535 if (regptr) 536 *regptr = newval; 537 } 538 set_fs(seg); 539 540 return; 541 542 sigbus: 543 die_if_kernel("Kernel unaligned instruction access", regs); 544 force_sig(SIGBUS, current); 545 546 /* 547 * XXX On return from the signal handler we should advance the epc 548 */ 549 } 550