1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/fpu.h> 89 #include <asm/fpu_emulator.h> 90 #include <asm/inst.h> 91 #include <asm/uaccess.h> 92 93 #define STR(x) __STR(x) 94 #define __STR(x) #x 95 96 enum { 97 UNALIGNED_ACTION_QUIET, 98 UNALIGNED_ACTION_SIGNAL, 99 UNALIGNED_ACTION_SHOW, 100 }; 101 #ifdef CONFIG_DEBUG_FS 102 static u32 unaligned_instructions; 103 static u32 unaligned_action; 104 #else 105 #define unaligned_action UNALIGNED_ACTION_QUIET 106 #endif 107 extern void show_registers(struct pt_regs *regs); 108 109 #ifdef __BIG_ENDIAN 110 #define _LoadHW(addr, value, res, type) \ 111 do { \ 112 __asm__ __volatile__ (".set\tnoat\n" \ 113 "1:\t"type##_lb("%0", "0(%2)")"\n" \ 114 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 115 "sll\t%0, 0x8\n\t" \ 116 "or\t%0, $1\n\t" \ 117 "li\t%1, 0\n" \ 118 "3:\t.set\tat\n\t" \ 119 ".insn\n\t" \ 120 ".section\t.fixup,\"ax\"\n\t" \ 121 "4:\tli\t%1, %3\n\t" \ 122 "j\t3b\n\t" \ 123 ".previous\n\t" \ 124 ".section\t__ex_table,\"a\"\n\t" \ 125 STR(PTR)"\t1b, 4b\n\t" \ 126 STR(PTR)"\t2b, 4b\n\t" \ 127 ".previous" \ 128 : "=&r" (value), "=r" (res) \ 129 : "r" (addr), "i" (-EFAULT)); \ 130 } while(0) 131 132 #ifndef CONFIG_CPU_MIPSR6 133 #define _LoadW(addr, value, res, type) \ 134 do { \ 135 __asm__ __volatile__ ( \ 136 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 137 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 138 "li\t%1, 0\n" \ 139 "3:\n\t" \ 140 ".insn\n\t" \ 141 ".section\t.fixup,\"ax\"\n\t" \ 142 "4:\tli\t%1, %3\n\t" \ 143 "j\t3b\n\t" \ 144 ".previous\n\t" \ 145 ".section\t__ex_table,\"a\"\n\t" \ 146 STR(PTR)"\t1b, 4b\n\t" \ 147 STR(PTR)"\t2b, 4b\n\t" \ 148 ".previous" \ 149 : "=&r" (value), "=r" (res) \ 150 : "r" (addr), "i" (-EFAULT)); \ 151 } while(0) 152 153 #else 154 /* MIPSR6 has no lwl instruction */ 155 #define _LoadW(addr, value, res, type) \ 156 do { \ 157 __asm__ __volatile__ ( \ 158 ".set\tpush\n" \ 159 ".set\tnoat\n\t" \ 160 "1:"type##_lb("%0", "0(%2)")"\n\t" \ 161 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 162 "sll\t%0, 0x8\n\t" \ 163 "or\t%0, $1\n\t" \ 164 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 165 "sll\t%0, 0x8\n\t" \ 166 "or\t%0, $1\n\t" \ 167 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 168 "sll\t%0, 0x8\n\t" \ 169 "or\t%0, $1\n\t" \ 170 "li\t%1, 0\n" \ 171 ".set\tpop\n" \ 172 "10:\n\t" \ 173 ".insn\n\t" \ 174 ".section\t.fixup,\"ax\"\n\t" \ 175 "11:\tli\t%1, %3\n\t" \ 176 "j\t10b\n\t" \ 177 ".previous\n\t" \ 178 ".section\t__ex_table,\"a\"\n\t" \ 179 STR(PTR)"\t1b, 11b\n\t" \ 180 STR(PTR)"\t2b, 11b\n\t" \ 181 STR(PTR)"\t3b, 11b\n\t" \ 182 STR(PTR)"\t4b, 11b\n\t" \ 183 ".previous" \ 184 : "=&r" (value), "=r" (res) \ 185 : "r" (addr), "i" (-EFAULT)); \ 186 } while(0) 187 188 #endif /* CONFIG_CPU_MIPSR6 */ 189 190 #define _LoadHWU(addr, value, res, type) \ 191 do { \ 192 __asm__ __volatile__ ( \ 193 ".set\tnoat\n" \ 194 "1:\t"type##_lbu("%0", "0(%2)")"\n" \ 195 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 196 "sll\t%0, 0x8\n\t" \ 197 "or\t%0, $1\n\t" \ 198 "li\t%1, 0\n" \ 199 "3:\n\t" \ 200 ".insn\n\t" \ 201 ".set\tat\n\t" \ 202 ".section\t.fixup,\"ax\"\n\t" \ 203 "4:\tli\t%1, %3\n\t" \ 204 "j\t3b\n\t" \ 205 ".previous\n\t" \ 206 ".section\t__ex_table,\"a\"\n\t" \ 207 STR(PTR)"\t1b, 4b\n\t" \ 208 STR(PTR)"\t2b, 4b\n\t" \ 209 ".previous" \ 210 : "=&r" (value), "=r" (res) \ 211 : "r" (addr), "i" (-EFAULT)); \ 212 } while(0) 213 214 #ifndef CONFIG_CPU_MIPSR6 215 #define _LoadWU(addr, value, res, type) \ 216 do { \ 217 __asm__ __volatile__ ( \ 218 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 219 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 220 "dsll\t%0, %0, 32\n\t" \ 221 "dsrl\t%0, %0, 32\n\t" \ 222 "li\t%1, 0\n" \ 223 "3:\n\t" \ 224 ".insn\n\t" \ 225 "\t.section\t.fixup,\"ax\"\n\t" \ 226 "4:\tli\t%1, %3\n\t" \ 227 "j\t3b\n\t" \ 228 ".previous\n\t" \ 229 ".section\t__ex_table,\"a\"\n\t" \ 230 STR(PTR)"\t1b, 4b\n\t" \ 231 STR(PTR)"\t2b, 4b\n\t" \ 232 ".previous" \ 233 : "=&r" (value), "=r" (res) \ 234 : "r" (addr), "i" (-EFAULT)); \ 235 } while(0) 236 237 #define _LoadDW(addr, value, res) \ 238 do { \ 239 __asm__ __volatile__ ( \ 240 "1:\tldl\t%0, (%2)\n" \ 241 "2:\tldr\t%0, 7(%2)\n\t" \ 242 "li\t%1, 0\n" \ 243 "3:\n\t" \ 244 ".insn\n\t" \ 245 "\t.section\t.fixup,\"ax\"\n\t" \ 246 "4:\tli\t%1, %3\n\t" \ 247 "j\t3b\n\t" \ 248 ".previous\n\t" \ 249 ".section\t__ex_table,\"a\"\n\t" \ 250 STR(PTR)"\t1b, 4b\n\t" \ 251 STR(PTR)"\t2b, 4b\n\t" \ 252 ".previous" \ 253 : "=&r" (value), "=r" (res) \ 254 : "r" (addr), "i" (-EFAULT)); \ 255 } while(0) 256 257 #else 258 /* MIPSR6 has not lwl and ldl instructions */ 259 #define _LoadWU(addr, value, res, type) \ 260 do { \ 261 __asm__ __volatile__ ( \ 262 ".set\tpush\n\t" \ 263 ".set\tnoat\n\t" \ 264 "1:"type##_lbu("%0", "0(%2)")"\n\t" \ 265 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 266 "sll\t%0, 0x8\n\t" \ 267 "or\t%0, $1\n\t" \ 268 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 269 "sll\t%0, 0x8\n\t" \ 270 "or\t%0, $1\n\t" \ 271 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 272 "sll\t%0, 0x8\n\t" \ 273 "or\t%0, $1\n\t" \ 274 "li\t%1, 0\n" \ 275 ".set\tpop\n" \ 276 "10:\n\t" \ 277 ".insn\n\t" \ 278 ".section\t.fixup,\"ax\"\n\t" \ 279 "11:\tli\t%1, %3\n\t" \ 280 "j\t10b\n\t" \ 281 ".previous\n\t" \ 282 ".section\t__ex_table,\"a\"\n\t" \ 283 STR(PTR)"\t1b, 11b\n\t" \ 284 STR(PTR)"\t2b, 11b\n\t" \ 285 STR(PTR)"\t3b, 11b\n\t" \ 286 STR(PTR)"\t4b, 11b\n\t" \ 287 ".previous" \ 288 : "=&r" (value), "=r" (res) \ 289 : "r" (addr), "i" (-EFAULT)); \ 290 } while(0) 291 292 #define _LoadDW(addr, value, res) \ 293 do { \ 294 __asm__ __volatile__ ( \ 295 ".set\tpush\n\t" \ 296 ".set\tnoat\n\t" \ 297 "1:lb\t%0, 0(%2)\n\t" \ 298 "2:lbu\t $1, 1(%2)\n\t" \ 299 "dsll\t%0, 0x8\n\t" \ 300 "or\t%0, $1\n\t" \ 301 "3:lbu\t$1, 2(%2)\n\t" \ 302 "dsll\t%0, 0x8\n\t" \ 303 "or\t%0, $1\n\t" \ 304 "4:lbu\t$1, 3(%2)\n\t" \ 305 "dsll\t%0, 0x8\n\t" \ 306 "or\t%0, $1\n\t" \ 307 "5:lbu\t$1, 4(%2)\n\t" \ 308 "dsll\t%0, 0x8\n\t" \ 309 "or\t%0, $1\n\t" \ 310 "6:lbu\t$1, 5(%2)\n\t" \ 311 "dsll\t%0, 0x8\n\t" \ 312 "or\t%0, $1\n\t" \ 313 "7:lbu\t$1, 6(%2)\n\t" \ 314 "dsll\t%0, 0x8\n\t" \ 315 "or\t%0, $1\n\t" \ 316 "8:lbu\t$1, 7(%2)\n\t" \ 317 "dsll\t%0, 0x8\n\t" \ 318 "or\t%0, $1\n\t" \ 319 "li\t%1, 0\n" \ 320 ".set\tpop\n\t" \ 321 "10:\n\t" \ 322 ".insn\n\t" \ 323 ".section\t.fixup,\"ax\"\n\t" \ 324 "11:\tli\t%1, %3\n\t" \ 325 "j\t10b\n\t" \ 326 ".previous\n\t" \ 327 ".section\t__ex_table,\"a\"\n\t" \ 328 STR(PTR)"\t1b, 11b\n\t" \ 329 STR(PTR)"\t2b, 11b\n\t" \ 330 STR(PTR)"\t3b, 11b\n\t" \ 331 STR(PTR)"\t4b, 11b\n\t" \ 332 STR(PTR)"\t5b, 11b\n\t" \ 333 STR(PTR)"\t6b, 11b\n\t" \ 334 STR(PTR)"\t7b, 11b\n\t" \ 335 STR(PTR)"\t8b, 11b\n\t" \ 336 ".previous" \ 337 : "=&r" (value), "=r" (res) \ 338 : "r" (addr), "i" (-EFAULT)); \ 339 } while(0) 340 341 #endif /* CONFIG_CPU_MIPSR6 */ 342 343 344 #define _StoreHW(addr, value, res, type) \ 345 do { \ 346 __asm__ __volatile__ ( \ 347 ".set\tnoat\n" \ 348 "1:\t"type##_sb("%1", "1(%2)")"\n" \ 349 "srl\t$1, %1, 0x8\n" \ 350 "2:\t"type##_sb("$1", "0(%2)")"\n" \ 351 ".set\tat\n\t" \ 352 "li\t%0, 0\n" \ 353 "3:\n\t" \ 354 ".insn\n\t" \ 355 ".section\t.fixup,\"ax\"\n\t" \ 356 "4:\tli\t%0, %3\n\t" \ 357 "j\t3b\n\t" \ 358 ".previous\n\t" \ 359 ".section\t__ex_table,\"a\"\n\t" \ 360 STR(PTR)"\t1b, 4b\n\t" \ 361 STR(PTR)"\t2b, 4b\n\t" \ 362 ".previous" \ 363 : "=r" (res) \ 364 : "r" (value), "r" (addr), "i" (-EFAULT));\ 365 } while(0) 366 367 #ifndef CONFIG_CPU_MIPSR6 368 #define _StoreW(addr, value, res, type) \ 369 do { \ 370 __asm__ __volatile__ ( \ 371 "1:\t"type##_swl("%1", "(%2)")"\n" \ 372 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ 373 "li\t%0, 0\n" \ 374 "3:\n\t" \ 375 ".insn\n\t" \ 376 ".section\t.fixup,\"ax\"\n\t" \ 377 "4:\tli\t%0, %3\n\t" \ 378 "j\t3b\n\t" \ 379 ".previous\n\t" \ 380 ".section\t__ex_table,\"a\"\n\t" \ 381 STR(PTR)"\t1b, 4b\n\t" \ 382 STR(PTR)"\t2b, 4b\n\t" \ 383 ".previous" \ 384 : "=r" (res) \ 385 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 386 } while(0) 387 388 #define _StoreDW(addr, value, res) \ 389 do { \ 390 __asm__ __volatile__ ( \ 391 "1:\tsdl\t%1,(%2)\n" \ 392 "2:\tsdr\t%1, 7(%2)\n\t" \ 393 "li\t%0, 0\n" \ 394 "3:\n\t" \ 395 ".insn\n\t" \ 396 ".section\t.fixup,\"ax\"\n\t" \ 397 "4:\tli\t%0, %3\n\t" \ 398 "j\t3b\n\t" \ 399 ".previous\n\t" \ 400 ".section\t__ex_table,\"a\"\n\t" \ 401 STR(PTR)"\t1b, 4b\n\t" \ 402 STR(PTR)"\t2b, 4b\n\t" \ 403 ".previous" \ 404 : "=r" (res) \ 405 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 406 } while(0) 407 408 #else 409 /* MIPSR6 has no swl and sdl instructions */ 410 #define _StoreW(addr, value, res, type) \ 411 do { \ 412 __asm__ __volatile__ ( \ 413 ".set\tpush\n\t" \ 414 ".set\tnoat\n\t" \ 415 "1:"type##_sb("%1", "3(%2)")"\n\t" \ 416 "srl\t$1, %1, 0x8\n\t" \ 417 "2:"type##_sb("$1", "2(%2)")"\n\t" \ 418 "srl\t$1, $1, 0x8\n\t" \ 419 "3:"type##_sb("$1", "1(%2)")"\n\t" \ 420 "srl\t$1, $1, 0x8\n\t" \ 421 "4:"type##_sb("$1", "0(%2)")"\n\t" \ 422 ".set\tpop\n\t" \ 423 "li\t%0, 0\n" \ 424 "10:\n\t" \ 425 ".insn\n\t" \ 426 ".section\t.fixup,\"ax\"\n\t" \ 427 "11:\tli\t%0, %3\n\t" \ 428 "j\t10b\n\t" \ 429 ".previous\n\t" \ 430 ".section\t__ex_table,\"a\"\n\t" \ 431 STR(PTR)"\t1b, 11b\n\t" \ 432 STR(PTR)"\t2b, 11b\n\t" \ 433 STR(PTR)"\t3b, 11b\n\t" \ 434 STR(PTR)"\t4b, 11b\n\t" \ 435 ".previous" \ 436 : "=&r" (res) \ 437 : "r" (value), "r" (addr), "i" (-EFAULT) \ 438 : "memory"); \ 439 } while(0) 440 441 #define _StoreDW(addr, value, res) \ 442 do { \ 443 __asm__ __volatile__ ( \ 444 ".set\tpush\n\t" \ 445 ".set\tnoat\n\t" \ 446 "1:sb\t%1, 7(%2)\n\t" \ 447 "dsrl\t$1, %1, 0x8\n\t" \ 448 "2:sb\t$1, 6(%2)\n\t" \ 449 "dsrl\t$1, $1, 0x8\n\t" \ 450 "3:sb\t$1, 5(%2)\n\t" \ 451 "dsrl\t$1, $1, 0x8\n\t" \ 452 "4:sb\t$1, 4(%2)\n\t" \ 453 "dsrl\t$1, $1, 0x8\n\t" \ 454 "5:sb\t$1, 3(%2)\n\t" \ 455 "dsrl\t$1, $1, 0x8\n\t" \ 456 "6:sb\t$1, 2(%2)\n\t" \ 457 "dsrl\t$1, $1, 0x8\n\t" \ 458 "7:sb\t$1, 1(%2)\n\t" \ 459 "dsrl\t$1, $1, 0x8\n\t" \ 460 "8:sb\t$1, 0(%2)\n\t" \ 461 "dsrl\t$1, $1, 0x8\n\t" \ 462 ".set\tpop\n\t" \ 463 "li\t%0, 0\n" \ 464 "10:\n\t" \ 465 ".insn\n\t" \ 466 ".section\t.fixup,\"ax\"\n\t" \ 467 "11:\tli\t%0, %3\n\t" \ 468 "j\t10b\n\t" \ 469 ".previous\n\t" \ 470 ".section\t__ex_table,\"a\"\n\t" \ 471 STR(PTR)"\t1b, 11b\n\t" \ 472 STR(PTR)"\t2b, 11b\n\t" \ 473 STR(PTR)"\t3b, 11b\n\t" \ 474 STR(PTR)"\t4b, 11b\n\t" \ 475 STR(PTR)"\t5b, 11b\n\t" \ 476 STR(PTR)"\t6b, 11b\n\t" \ 477 STR(PTR)"\t7b, 11b\n\t" \ 478 STR(PTR)"\t8b, 11b\n\t" \ 479 ".previous" \ 480 : "=&r" (res) \ 481 : "r" (value), "r" (addr), "i" (-EFAULT) \ 482 : "memory"); \ 483 } while(0) 484 485 #endif /* CONFIG_CPU_MIPSR6 */ 486 487 #else /* __BIG_ENDIAN */ 488 489 #define _LoadHW(addr, value, res, type) \ 490 do { \ 491 __asm__ __volatile__ (".set\tnoat\n" \ 492 "1:\t"type##_lb("%0", "1(%2)")"\n" \ 493 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 494 "sll\t%0, 0x8\n\t" \ 495 "or\t%0, $1\n\t" \ 496 "li\t%1, 0\n" \ 497 "3:\t.set\tat\n\t" \ 498 ".insn\n\t" \ 499 ".section\t.fixup,\"ax\"\n\t" \ 500 "4:\tli\t%1, %3\n\t" \ 501 "j\t3b\n\t" \ 502 ".previous\n\t" \ 503 ".section\t__ex_table,\"a\"\n\t" \ 504 STR(PTR)"\t1b, 4b\n\t" \ 505 STR(PTR)"\t2b, 4b\n\t" \ 506 ".previous" \ 507 : "=&r" (value), "=r" (res) \ 508 : "r" (addr), "i" (-EFAULT)); \ 509 } while(0) 510 511 #ifndef CONFIG_CPU_MIPSR6 512 #define _LoadW(addr, value, res, type) \ 513 do { \ 514 __asm__ __volatile__ ( \ 515 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 516 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 517 "li\t%1, 0\n" \ 518 "3:\n\t" \ 519 ".insn\n\t" \ 520 ".section\t.fixup,\"ax\"\n\t" \ 521 "4:\tli\t%1, %3\n\t" \ 522 "j\t3b\n\t" \ 523 ".previous\n\t" \ 524 ".section\t__ex_table,\"a\"\n\t" \ 525 STR(PTR)"\t1b, 4b\n\t" \ 526 STR(PTR)"\t2b, 4b\n\t" \ 527 ".previous" \ 528 : "=&r" (value), "=r" (res) \ 529 : "r" (addr), "i" (-EFAULT)); \ 530 } while(0) 531 532 #else 533 /* MIPSR6 has no lwl instruction */ 534 #define _LoadW(addr, value, res, type) \ 535 do { \ 536 __asm__ __volatile__ ( \ 537 ".set\tpush\n" \ 538 ".set\tnoat\n\t" \ 539 "1:"type##_lb("%0", "3(%2)")"\n\t" \ 540 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 541 "sll\t%0, 0x8\n\t" \ 542 "or\t%0, $1\n\t" \ 543 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 544 "sll\t%0, 0x8\n\t" \ 545 "or\t%0, $1\n\t" \ 546 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 547 "sll\t%0, 0x8\n\t" \ 548 "or\t%0, $1\n\t" \ 549 "li\t%1, 0\n" \ 550 ".set\tpop\n" \ 551 "10:\n\t" \ 552 ".insn\n\t" \ 553 ".section\t.fixup,\"ax\"\n\t" \ 554 "11:\tli\t%1, %3\n\t" \ 555 "j\t10b\n\t" \ 556 ".previous\n\t" \ 557 ".section\t__ex_table,\"a\"\n\t" \ 558 STR(PTR)"\t1b, 11b\n\t" \ 559 STR(PTR)"\t2b, 11b\n\t" \ 560 STR(PTR)"\t3b, 11b\n\t" \ 561 STR(PTR)"\t4b, 11b\n\t" \ 562 ".previous" \ 563 : "=&r" (value), "=r" (res) \ 564 : "r" (addr), "i" (-EFAULT)); \ 565 } while(0) 566 567 #endif /* CONFIG_CPU_MIPSR6 */ 568 569 570 #define _LoadHWU(addr, value, res, type) \ 571 do { \ 572 __asm__ __volatile__ ( \ 573 ".set\tnoat\n" \ 574 "1:\t"type##_lbu("%0", "1(%2)")"\n" \ 575 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 576 "sll\t%0, 0x8\n\t" \ 577 "or\t%0, $1\n\t" \ 578 "li\t%1, 0\n" \ 579 "3:\n\t" \ 580 ".insn\n\t" \ 581 ".set\tat\n\t" \ 582 ".section\t.fixup,\"ax\"\n\t" \ 583 "4:\tli\t%1, %3\n\t" \ 584 "j\t3b\n\t" \ 585 ".previous\n\t" \ 586 ".section\t__ex_table,\"a\"\n\t" \ 587 STR(PTR)"\t1b, 4b\n\t" \ 588 STR(PTR)"\t2b, 4b\n\t" \ 589 ".previous" \ 590 : "=&r" (value), "=r" (res) \ 591 : "r" (addr), "i" (-EFAULT)); \ 592 } while(0) 593 594 #ifndef CONFIG_CPU_MIPSR6 595 #define _LoadWU(addr, value, res, type) \ 596 do { \ 597 __asm__ __volatile__ ( \ 598 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 599 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 600 "dsll\t%0, %0, 32\n\t" \ 601 "dsrl\t%0, %0, 32\n\t" \ 602 "li\t%1, 0\n" \ 603 "3:\n\t" \ 604 ".insn\n\t" \ 605 "\t.section\t.fixup,\"ax\"\n\t" \ 606 "4:\tli\t%1, %3\n\t" \ 607 "j\t3b\n\t" \ 608 ".previous\n\t" \ 609 ".section\t__ex_table,\"a\"\n\t" \ 610 STR(PTR)"\t1b, 4b\n\t" \ 611 STR(PTR)"\t2b, 4b\n\t" \ 612 ".previous" \ 613 : "=&r" (value), "=r" (res) \ 614 : "r" (addr), "i" (-EFAULT)); \ 615 } while(0) 616 617 #define _LoadDW(addr, value, res) \ 618 do { \ 619 __asm__ __volatile__ ( \ 620 "1:\tldl\t%0, 7(%2)\n" \ 621 "2:\tldr\t%0, (%2)\n\t" \ 622 "li\t%1, 0\n" \ 623 "3:\n\t" \ 624 ".insn\n\t" \ 625 "\t.section\t.fixup,\"ax\"\n\t" \ 626 "4:\tli\t%1, %3\n\t" \ 627 "j\t3b\n\t" \ 628 ".previous\n\t" \ 629 ".section\t__ex_table,\"a\"\n\t" \ 630 STR(PTR)"\t1b, 4b\n\t" \ 631 STR(PTR)"\t2b, 4b\n\t" \ 632 ".previous" \ 633 : "=&r" (value), "=r" (res) \ 634 : "r" (addr), "i" (-EFAULT)); \ 635 } while(0) 636 637 #else 638 /* MIPSR6 has not lwl and ldl instructions */ 639 #define _LoadWU(addr, value, res, type) \ 640 do { \ 641 __asm__ __volatile__ ( \ 642 ".set\tpush\n\t" \ 643 ".set\tnoat\n\t" \ 644 "1:"type##_lbu("%0", "3(%2)")"\n\t" \ 645 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 646 "sll\t%0, 0x8\n\t" \ 647 "or\t%0, $1\n\t" \ 648 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 649 "sll\t%0, 0x8\n\t" \ 650 "or\t%0, $1\n\t" \ 651 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 652 "sll\t%0, 0x8\n\t" \ 653 "or\t%0, $1\n\t" \ 654 "li\t%1, 0\n" \ 655 ".set\tpop\n" \ 656 "10:\n\t" \ 657 ".insn\n\t" \ 658 ".section\t.fixup,\"ax\"\n\t" \ 659 "11:\tli\t%1, %3\n\t" \ 660 "j\t10b\n\t" \ 661 ".previous\n\t" \ 662 ".section\t__ex_table,\"a\"\n\t" \ 663 STR(PTR)"\t1b, 11b\n\t" \ 664 STR(PTR)"\t2b, 11b\n\t" \ 665 STR(PTR)"\t3b, 11b\n\t" \ 666 STR(PTR)"\t4b, 11b\n\t" \ 667 ".previous" \ 668 : "=&r" (value), "=r" (res) \ 669 : "r" (addr), "i" (-EFAULT)); \ 670 } while(0) 671 672 #define _LoadDW(addr, value, res) \ 673 do { \ 674 __asm__ __volatile__ ( \ 675 ".set\tpush\n\t" \ 676 ".set\tnoat\n\t" \ 677 "1:lb\t%0, 7(%2)\n\t" \ 678 "2:lbu\t$1, 6(%2)\n\t" \ 679 "dsll\t%0, 0x8\n\t" \ 680 "or\t%0, $1\n\t" \ 681 "3:lbu\t$1, 5(%2)\n\t" \ 682 "dsll\t%0, 0x8\n\t" \ 683 "or\t%0, $1\n\t" \ 684 "4:lbu\t$1, 4(%2)\n\t" \ 685 "dsll\t%0, 0x8\n\t" \ 686 "or\t%0, $1\n\t" \ 687 "5:lbu\t$1, 3(%2)\n\t" \ 688 "dsll\t%0, 0x8\n\t" \ 689 "or\t%0, $1\n\t" \ 690 "6:lbu\t$1, 2(%2)\n\t" \ 691 "dsll\t%0, 0x8\n\t" \ 692 "or\t%0, $1\n\t" \ 693 "7:lbu\t$1, 1(%2)\n\t" \ 694 "dsll\t%0, 0x8\n\t" \ 695 "or\t%0, $1\n\t" \ 696 "8:lbu\t$1, 0(%2)\n\t" \ 697 "dsll\t%0, 0x8\n\t" \ 698 "or\t%0, $1\n\t" \ 699 "li\t%1, 0\n" \ 700 ".set\tpop\n\t" \ 701 "10:\n\t" \ 702 ".insn\n\t" \ 703 ".section\t.fixup,\"ax\"\n\t" \ 704 "11:\tli\t%1, %3\n\t" \ 705 "j\t10b\n\t" \ 706 ".previous\n\t" \ 707 ".section\t__ex_table,\"a\"\n\t" \ 708 STR(PTR)"\t1b, 11b\n\t" \ 709 STR(PTR)"\t2b, 11b\n\t" \ 710 STR(PTR)"\t3b, 11b\n\t" \ 711 STR(PTR)"\t4b, 11b\n\t" \ 712 STR(PTR)"\t5b, 11b\n\t" \ 713 STR(PTR)"\t6b, 11b\n\t" \ 714 STR(PTR)"\t7b, 11b\n\t" \ 715 STR(PTR)"\t8b, 11b\n\t" \ 716 ".previous" \ 717 : "=&r" (value), "=r" (res) \ 718 : "r" (addr), "i" (-EFAULT)); \ 719 } while(0) 720 #endif /* CONFIG_CPU_MIPSR6 */ 721 722 #define _StoreHW(addr, value, res, type) \ 723 do { \ 724 __asm__ __volatile__ ( \ 725 ".set\tnoat\n" \ 726 "1:\t"type##_sb("%1", "0(%2)")"\n" \ 727 "srl\t$1,%1, 0x8\n" \ 728 "2:\t"type##_sb("$1", "1(%2)")"\n" \ 729 ".set\tat\n\t" \ 730 "li\t%0, 0\n" \ 731 "3:\n\t" \ 732 ".insn\n\t" \ 733 ".section\t.fixup,\"ax\"\n\t" \ 734 "4:\tli\t%0, %3\n\t" \ 735 "j\t3b\n\t" \ 736 ".previous\n\t" \ 737 ".section\t__ex_table,\"a\"\n\t" \ 738 STR(PTR)"\t1b, 4b\n\t" \ 739 STR(PTR)"\t2b, 4b\n\t" \ 740 ".previous" \ 741 : "=r" (res) \ 742 : "r" (value), "r" (addr), "i" (-EFAULT));\ 743 } while(0) 744 745 #ifndef CONFIG_CPU_MIPSR6 746 #define _StoreW(addr, value, res, type) \ 747 do { \ 748 __asm__ __volatile__ ( \ 749 "1:\t"type##_swl("%1", "3(%2)")"\n" \ 750 "2:\t"type##_swr("%1", "(%2)")"\n\t"\ 751 "li\t%0, 0\n" \ 752 "3:\n\t" \ 753 ".insn\n\t" \ 754 ".section\t.fixup,\"ax\"\n\t" \ 755 "4:\tli\t%0, %3\n\t" \ 756 "j\t3b\n\t" \ 757 ".previous\n\t" \ 758 ".section\t__ex_table,\"a\"\n\t" \ 759 STR(PTR)"\t1b, 4b\n\t" \ 760 STR(PTR)"\t2b, 4b\n\t" \ 761 ".previous" \ 762 : "=r" (res) \ 763 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 764 } while(0) 765 766 #define _StoreDW(addr, value, res) \ 767 do { \ 768 __asm__ __volatile__ ( \ 769 "1:\tsdl\t%1, 7(%2)\n" \ 770 "2:\tsdr\t%1, (%2)\n\t" \ 771 "li\t%0, 0\n" \ 772 "3:\n\t" \ 773 ".insn\n\t" \ 774 ".section\t.fixup,\"ax\"\n\t" \ 775 "4:\tli\t%0, %3\n\t" \ 776 "j\t3b\n\t" \ 777 ".previous\n\t" \ 778 ".section\t__ex_table,\"a\"\n\t" \ 779 STR(PTR)"\t1b, 4b\n\t" \ 780 STR(PTR)"\t2b, 4b\n\t" \ 781 ".previous" \ 782 : "=r" (res) \ 783 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 784 } while(0) 785 786 #else 787 /* MIPSR6 has no swl and sdl instructions */ 788 #define _StoreW(addr, value, res, type) \ 789 do { \ 790 __asm__ __volatile__ ( \ 791 ".set\tpush\n\t" \ 792 ".set\tnoat\n\t" \ 793 "1:"type##_sb("%1", "0(%2)")"\n\t" \ 794 "srl\t$1, %1, 0x8\n\t" \ 795 "2:"type##_sb("$1", "1(%2)")"\n\t" \ 796 "srl\t$1, $1, 0x8\n\t" \ 797 "3:"type##_sb("$1", "2(%2)")"\n\t" \ 798 "srl\t$1, $1, 0x8\n\t" \ 799 "4:"type##_sb("$1", "3(%2)")"\n\t" \ 800 ".set\tpop\n\t" \ 801 "li\t%0, 0\n" \ 802 "10:\n\t" \ 803 ".insn\n\t" \ 804 ".section\t.fixup,\"ax\"\n\t" \ 805 "11:\tli\t%0, %3\n\t" \ 806 "j\t10b\n\t" \ 807 ".previous\n\t" \ 808 ".section\t__ex_table,\"a\"\n\t" \ 809 STR(PTR)"\t1b, 11b\n\t" \ 810 STR(PTR)"\t2b, 11b\n\t" \ 811 STR(PTR)"\t3b, 11b\n\t" \ 812 STR(PTR)"\t4b, 11b\n\t" \ 813 ".previous" \ 814 : "=&r" (res) \ 815 : "r" (value), "r" (addr), "i" (-EFAULT) \ 816 : "memory"); \ 817 } while(0) 818 819 #define _StoreDW(addr, value, res) \ 820 do { \ 821 __asm__ __volatile__ ( \ 822 ".set\tpush\n\t" \ 823 ".set\tnoat\n\t" \ 824 "1:sb\t%1, 0(%2)\n\t" \ 825 "dsrl\t$1, %1, 0x8\n\t" \ 826 "2:sb\t$1, 1(%2)\n\t" \ 827 "dsrl\t$1, $1, 0x8\n\t" \ 828 "3:sb\t$1, 2(%2)\n\t" \ 829 "dsrl\t$1, $1, 0x8\n\t" \ 830 "4:sb\t$1, 3(%2)\n\t" \ 831 "dsrl\t$1, $1, 0x8\n\t" \ 832 "5:sb\t$1, 4(%2)\n\t" \ 833 "dsrl\t$1, $1, 0x8\n\t" \ 834 "6:sb\t$1, 5(%2)\n\t" \ 835 "dsrl\t$1, $1, 0x8\n\t" \ 836 "7:sb\t$1, 6(%2)\n\t" \ 837 "dsrl\t$1, $1, 0x8\n\t" \ 838 "8:sb\t$1, 7(%2)\n\t" \ 839 "dsrl\t$1, $1, 0x8\n\t" \ 840 ".set\tpop\n\t" \ 841 "li\t%0, 0\n" \ 842 "10:\n\t" \ 843 ".insn\n\t" \ 844 ".section\t.fixup,\"ax\"\n\t" \ 845 "11:\tli\t%0, %3\n\t" \ 846 "j\t10b\n\t" \ 847 ".previous\n\t" \ 848 ".section\t__ex_table,\"a\"\n\t" \ 849 STR(PTR)"\t1b, 11b\n\t" \ 850 STR(PTR)"\t2b, 11b\n\t" \ 851 STR(PTR)"\t3b, 11b\n\t" \ 852 STR(PTR)"\t4b, 11b\n\t" \ 853 STR(PTR)"\t5b, 11b\n\t" \ 854 STR(PTR)"\t6b, 11b\n\t" \ 855 STR(PTR)"\t7b, 11b\n\t" \ 856 STR(PTR)"\t8b, 11b\n\t" \ 857 ".previous" \ 858 : "=&r" (res) \ 859 : "r" (value), "r" (addr), "i" (-EFAULT) \ 860 : "memory"); \ 861 } while(0) 862 863 #endif /* CONFIG_CPU_MIPSR6 */ 864 #endif 865 866 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) 867 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) 868 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) 869 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) 870 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) 871 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) 872 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) 873 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user) 874 #define LoadDW(addr, value, res) _LoadDW(addr, value, res) 875 876 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) 877 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) 878 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) 879 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user) 880 #define StoreDW(addr, value, res) _StoreDW(addr, value, res) 881 882 static void emulate_load_store_insn(struct pt_regs *regs, 883 void __user *addr, unsigned int __user *pc) 884 { 885 union mips_instruction insn; 886 unsigned long value; 887 unsigned int res; 888 unsigned long origpc; 889 unsigned long orig31; 890 void __user *fault_addr = NULL; 891 #ifdef CONFIG_EVA 892 mm_segment_t seg; 893 #endif 894 union fpureg *fpr; 895 enum msa_2b_fmt df; 896 unsigned int wd; 897 origpc = (unsigned long)pc; 898 orig31 = regs->regs[31]; 899 900 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 901 902 /* 903 * This load never faults. 904 */ 905 __get_user(insn.word, pc); 906 907 switch (insn.i_format.opcode) { 908 /* 909 * These are instructions that a compiler doesn't generate. We 910 * can assume therefore that the code is MIPS-aware and 911 * really buggy. Emulating these instructions would break the 912 * semantics anyway. 913 */ 914 case ll_op: 915 case lld_op: 916 case sc_op: 917 case scd_op: 918 919 /* 920 * For these instructions the only way to create an address 921 * error is an attempted access to kernel/supervisor address 922 * space. 923 */ 924 case ldl_op: 925 case ldr_op: 926 case lwl_op: 927 case lwr_op: 928 case sdl_op: 929 case sdr_op: 930 case swl_op: 931 case swr_op: 932 case lb_op: 933 case lbu_op: 934 case sb_op: 935 goto sigbus; 936 937 /* 938 * The remaining opcodes are the ones that are really of 939 * interest. 940 */ 941 #ifdef CONFIG_EVA 942 case spec3_op: 943 /* 944 * we can land here only from kernel accessing user memory, 945 * so we need to "switch" the address limit to user space, so 946 * address check can work properly. 947 */ 948 seg = get_fs(); 949 set_fs(USER_DS); 950 switch (insn.spec3_format.func) { 951 case lhe_op: 952 if (!access_ok(VERIFY_READ, addr, 2)) { 953 set_fs(seg); 954 goto sigbus; 955 } 956 LoadHWE(addr, value, res); 957 if (res) { 958 set_fs(seg); 959 goto fault; 960 } 961 compute_return_epc(regs); 962 regs->regs[insn.spec3_format.rt] = value; 963 break; 964 case lwe_op: 965 if (!access_ok(VERIFY_READ, addr, 4)) { 966 set_fs(seg); 967 goto sigbus; 968 } 969 LoadWE(addr, value, res); 970 if (res) { 971 set_fs(seg); 972 goto fault; 973 } 974 compute_return_epc(regs); 975 regs->regs[insn.spec3_format.rt] = value; 976 break; 977 case lhue_op: 978 if (!access_ok(VERIFY_READ, addr, 2)) { 979 set_fs(seg); 980 goto sigbus; 981 } 982 LoadHWUE(addr, value, res); 983 if (res) { 984 set_fs(seg); 985 goto fault; 986 } 987 compute_return_epc(regs); 988 regs->regs[insn.spec3_format.rt] = value; 989 break; 990 case she_op: 991 if (!access_ok(VERIFY_WRITE, addr, 2)) { 992 set_fs(seg); 993 goto sigbus; 994 } 995 compute_return_epc(regs); 996 value = regs->regs[insn.spec3_format.rt]; 997 StoreHWE(addr, value, res); 998 if (res) { 999 set_fs(seg); 1000 goto fault; 1001 } 1002 break; 1003 case swe_op: 1004 if (!access_ok(VERIFY_WRITE, addr, 4)) { 1005 set_fs(seg); 1006 goto sigbus; 1007 } 1008 compute_return_epc(regs); 1009 value = regs->regs[insn.spec3_format.rt]; 1010 StoreWE(addr, value, res); 1011 if (res) { 1012 set_fs(seg); 1013 goto fault; 1014 } 1015 break; 1016 default: 1017 set_fs(seg); 1018 goto sigill; 1019 } 1020 set_fs(seg); 1021 break; 1022 #endif 1023 case lh_op: 1024 if (!access_ok(VERIFY_READ, addr, 2)) 1025 goto sigbus; 1026 1027 if (config_enabled(CONFIG_EVA)) { 1028 if (segment_eq(get_fs(), get_ds())) 1029 LoadHW(addr, value, res); 1030 else 1031 LoadHWE(addr, value, res); 1032 } else { 1033 LoadHW(addr, value, res); 1034 } 1035 1036 if (res) 1037 goto fault; 1038 compute_return_epc(regs); 1039 regs->regs[insn.i_format.rt] = value; 1040 break; 1041 1042 case lw_op: 1043 if (!access_ok(VERIFY_READ, addr, 4)) 1044 goto sigbus; 1045 1046 if (config_enabled(CONFIG_EVA)) { 1047 if (segment_eq(get_fs(), get_ds())) 1048 LoadW(addr, value, res); 1049 else 1050 LoadWE(addr, value, res); 1051 } else { 1052 LoadW(addr, value, res); 1053 } 1054 1055 if (res) 1056 goto fault; 1057 compute_return_epc(regs); 1058 regs->regs[insn.i_format.rt] = value; 1059 break; 1060 1061 case lhu_op: 1062 if (!access_ok(VERIFY_READ, addr, 2)) 1063 goto sigbus; 1064 1065 if (config_enabled(CONFIG_EVA)) { 1066 if (segment_eq(get_fs(), get_ds())) 1067 LoadHWU(addr, value, res); 1068 else 1069 LoadHWUE(addr, value, res); 1070 } else { 1071 LoadHWU(addr, value, res); 1072 } 1073 1074 if (res) 1075 goto fault; 1076 compute_return_epc(regs); 1077 regs->regs[insn.i_format.rt] = value; 1078 break; 1079 1080 case lwu_op: 1081 #ifdef CONFIG_64BIT 1082 /* 1083 * A 32-bit kernel might be running on a 64-bit processor. But 1084 * if we're on a 32-bit processor and an i-cache incoherency 1085 * or race makes us see a 64-bit instruction here the sdl/sdr 1086 * would blow up, so for now we don't handle unaligned 64-bit 1087 * instructions on 32-bit kernels. 1088 */ 1089 if (!access_ok(VERIFY_READ, addr, 4)) 1090 goto sigbus; 1091 1092 LoadWU(addr, value, res); 1093 if (res) 1094 goto fault; 1095 compute_return_epc(regs); 1096 regs->regs[insn.i_format.rt] = value; 1097 break; 1098 #endif /* CONFIG_64BIT */ 1099 1100 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1101 goto sigill; 1102 1103 case ld_op: 1104 #ifdef CONFIG_64BIT 1105 /* 1106 * A 32-bit kernel might be running on a 64-bit processor. But 1107 * if we're on a 32-bit processor and an i-cache incoherency 1108 * or race makes us see a 64-bit instruction here the sdl/sdr 1109 * would blow up, so for now we don't handle unaligned 64-bit 1110 * instructions on 32-bit kernels. 1111 */ 1112 if (!access_ok(VERIFY_READ, addr, 8)) 1113 goto sigbus; 1114 1115 LoadDW(addr, value, res); 1116 if (res) 1117 goto fault; 1118 compute_return_epc(regs); 1119 regs->regs[insn.i_format.rt] = value; 1120 break; 1121 #endif /* CONFIG_64BIT */ 1122 1123 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1124 goto sigill; 1125 1126 case sh_op: 1127 if (!access_ok(VERIFY_WRITE, addr, 2)) 1128 goto sigbus; 1129 1130 compute_return_epc(regs); 1131 value = regs->regs[insn.i_format.rt]; 1132 1133 if (config_enabled(CONFIG_EVA)) { 1134 if (segment_eq(get_fs(), get_ds())) 1135 StoreHW(addr, value, res); 1136 else 1137 StoreHWE(addr, value, res); 1138 } else { 1139 StoreHW(addr, value, res); 1140 } 1141 1142 if (res) 1143 goto fault; 1144 break; 1145 1146 case sw_op: 1147 if (!access_ok(VERIFY_WRITE, addr, 4)) 1148 goto sigbus; 1149 1150 compute_return_epc(regs); 1151 value = regs->regs[insn.i_format.rt]; 1152 1153 if (config_enabled(CONFIG_EVA)) { 1154 if (segment_eq(get_fs(), get_ds())) 1155 StoreW(addr, value, res); 1156 else 1157 StoreWE(addr, value, res); 1158 } else { 1159 StoreW(addr, value, res); 1160 } 1161 1162 if (res) 1163 goto fault; 1164 break; 1165 1166 case sd_op: 1167 #ifdef CONFIG_64BIT 1168 /* 1169 * A 32-bit kernel might be running on a 64-bit processor. But 1170 * if we're on a 32-bit processor and an i-cache incoherency 1171 * or race makes us see a 64-bit instruction here the sdl/sdr 1172 * would blow up, so for now we don't handle unaligned 64-bit 1173 * instructions on 32-bit kernels. 1174 */ 1175 if (!access_ok(VERIFY_WRITE, addr, 8)) 1176 goto sigbus; 1177 1178 compute_return_epc(regs); 1179 value = regs->regs[insn.i_format.rt]; 1180 StoreDW(addr, value, res); 1181 if (res) 1182 goto fault; 1183 break; 1184 #endif /* CONFIG_64BIT */ 1185 1186 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1187 goto sigill; 1188 1189 case lwc1_op: 1190 case ldc1_op: 1191 case swc1_op: 1192 case sdc1_op: 1193 die_if_kernel("Unaligned FP access in kernel code", regs); 1194 BUG_ON(!used_math()); 1195 1196 lose_fpu(1); /* Save FPU state for the emulator. */ 1197 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1198 &fault_addr); 1199 own_fpu(1); /* Restore FPU state. */ 1200 1201 /* Signal if something went wrong. */ 1202 process_fpemu_return(res, fault_addr, 0); 1203 1204 if (res == 0) 1205 break; 1206 return; 1207 1208 case msa_op: 1209 if (!cpu_has_msa) 1210 goto sigill; 1211 1212 /* 1213 * If we've reached this point then userland should have taken 1214 * the MSA disabled exception & initialised vector context at 1215 * some point in the past. 1216 */ 1217 BUG_ON(!thread_msa_context_live()); 1218 1219 df = insn.msa_mi10_format.df; 1220 wd = insn.msa_mi10_format.wd; 1221 fpr = ¤t->thread.fpu.fpr[wd]; 1222 1223 switch (insn.msa_mi10_format.func) { 1224 case msa_ld_op: 1225 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1226 goto sigbus; 1227 1228 /* 1229 * Disable preemption to avoid a race between copying 1230 * state from userland, migrating to another CPU and 1231 * updating the hardware vector register below. 1232 */ 1233 preempt_disable(); 1234 1235 res = __copy_from_user_inatomic(fpr, addr, 1236 sizeof(*fpr)); 1237 if (res) 1238 goto fault; 1239 1240 /* 1241 * Update the hardware register if it is in use by the 1242 * task in this quantum, in order to avoid having to 1243 * save & restore the whole vector context. 1244 */ 1245 if (test_thread_flag(TIF_USEDMSA)) 1246 write_msa_wr(wd, fpr, df); 1247 1248 preempt_enable(); 1249 break; 1250 1251 case msa_st_op: 1252 if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr))) 1253 goto sigbus; 1254 1255 /* 1256 * Update from the hardware register if it is in use by 1257 * the task in this quantum, in order to avoid having to 1258 * save & restore the whole vector context. 1259 */ 1260 preempt_disable(); 1261 if (test_thread_flag(TIF_USEDMSA)) 1262 read_msa_wr(wd, fpr, df); 1263 preempt_enable(); 1264 1265 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 1266 if (res) 1267 goto fault; 1268 break; 1269 1270 default: 1271 goto sigbus; 1272 } 1273 1274 compute_return_epc(regs); 1275 break; 1276 1277 #ifndef CONFIG_CPU_MIPSR6 1278 /* 1279 * COP2 is available to implementor for application specific use. 1280 * It's up to applications to register a notifier chain and do 1281 * whatever they have to do, including possible sending of signals. 1282 * 1283 * This instruction has been reallocated in Release 6 1284 */ 1285 case lwc2_op: 1286 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1287 break; 1288 1289 case ldc2_op: 1290 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 1291 break; 1292 1293 case swc2_op: 1294 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 1295 break; 1296 1297 case sdc2_op: 1298 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1299 break; 1300 #endif 1301 default: 1302 /* 1303 * Pheeee... We encountered an yet unknown instruction or 1304 * cache coherence problem. Die sucker, die ... 1305 */ 1306 goto sigill; 1307 } 1308 1309 #ifdef CONFIG_DEBUG_FS 1310 unaligned_instructions++; 1311 #endif 1312 1313 return; 1314 1315 fault: 1316 /* roll back jump/branch */ 1317 regs->cp0_epc = origpc; 1318 regs->regs[31] = orig31; 1319 /* Did we have an exception handler installed? */ 1320 if (fixup_exception(regs)) 1321 return; 1322 1323 die_if_kernel("Unhandled kernel unaligned access", regs); 1324 force_sig(SIGSEGV, current); 1325 1326 return; 1327 1328 sigbus: 1329 die_if_kernel("Unhandled kernel unaligned access", regs); 1330 force_sig(SIGBUS, current); 1331 1332 return; 1333 1334 sigill: 1335 die_if_kernel 1336 ("Unhandled kernel unaligned access or invalid instruction", regs); 1337 force_sig(SIGILL, current); 1338 } 1339 1340 /* Recode table from 16-bit register notation to 32-bit GPR. */ 1341 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 1342 1343 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 1344 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 1345 1346 static void emulate_load_store_microMIPS(struct pt_regs *regs, 1347 void __user *addr) 1348 { 1349 unsigned long value; 1350 unsigned int res; 1351 int i; 1352 unsigned int reg = 0, rvar; 1353 unsigned long orig31; 1354 u16 __user *pc16; 1355 u16 halfword; 1356 unsigned int word; 1357 unsigned long origpc, contpc; 1358 union mips_instruction insn; 1359 struct mm_decoded_insn mminsn; 1360 void __user *fault_addr = NULL; 1361 1362 origpc = regs->cp0_epc; 1363 orig31 = regs->regs[31]; 1364 1365 mminsn.micro_mips_mode = 1; 1366 1367 /* 1368 * This load never faults. 1369 */ 1370 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 1371 __get_user(halfword, pc16); 1372 pc16++; 1373 contpc = regs->cp0_epc + 2; 1374 word = ((unsigned int)halfword << 16); 1375 mminsn.pc_inc = 2; 1376 1377 if (!mm_insn_16bit(halfword)) { 1378 __get_user(halfword, pc16); 1379 pc16++; 1380 contpc = regs->cp0_epc + 4; 1381 mminsn.pc_inc = 4; 1382 word |= halfword; 1383 } 1384 mminsn.insn = word; 1385 1386 if (get_user(halfword, pc16)) 1387 goto fault; 1388 mminsn.next_pc_inc = 2; 1389 word = ((unsigned int)halfword << 16); 1390 1391 if (!mm_insn_16bit(halfword)) { 1392 pc16++; 1393 if (get_user(halfword, pc16)) 1394 goto fault; 1395 mminsn.next_pc_inc = 4; 1396 word |= halfword; 1397 } 1398 mminsn.next_insn = word; 1399 1400 insn = (union mips_instruction)(mminsn.insn); 1401 if (mm_isBranchInstr(regs, mminsn, &contpc)) 1402 insn = (union mips_instruction)(mminsn.next_insn); 1403 1404 /* Parse instruction to find what to do */ 1405 1406 switch (insn.mm_i_format.opcode) { 1407 1408 case mm_pool32a_op: 1409 switch (insn.mm_x_format.func) { 1410 case mm_lwxs_op: 1411 reg = insn.mm_x_format.rd; 1412 goto loadW; 1413 } 1414 1415 goto sigbus; 1416 1417 case mm_pool32b_op: 1418 switch (insn.mm_m_format.func) { 1419 case mm_lwp_func: 1420 reg = insn.mm_m_format.rd; 1421 if (reg == 31) 1422 goto sigbus; 1423 1424 if (!access_ok(VERIFY_READ, addr, 8)) 1425 goto sigbus; 1426 1427 LoadW(addr, value, res); 1428 if (res) 1429 goto fault; 1430 regs->regs[reg] = value; 1431 addr += 4; 1432 LoadW(addr, value, res); 1433 if (res) 1434 goto fault; 1435 regs->regs[reg + 1] = value; 1436 goto success; 1437 1438 case mm_swp_func: 1439 reg = insn.mm_m_format.rd; 1440 if (reg == 31) 1441 goto sigbus; 1442 1443 if (!access_ok(VERIFY_WRITE, addr, 8)) 1444 goto sigbus; 1445 1446 value = regs->regs[reg]; 1447 StoreW(addr, value, res); 1448 if (res) 1449 goto fault; 1450 addr += 4; 1451 value = regs->regs[reg + 1]; 1452 StoreW(addr, value, res); 1453 if (res) 1454 goto fault; 1455 goto success; 1456 1457 case mm_ldp_func: 1458 #ifdef CONFIG_64BIT 1459 reg = insn.mm_m_format.rd; 1460 if (reg == 31) 1461 goto sigbus; 1462 1463 if (!access_ok(VERIFY_READ, addr, 16)) 1464 goto sigbus; 1465 1466 LoadDW(addr, value, res); 1467 if (res) 1468 goto fault; 1469 regs->regs[reg] = value; 1470 addr += 8; 1471 LoadDW(addr, value, res); 1472 if (res) 1473 goto fault; 1474 regs->regs[reg + 1] = value; 1475 goto success; 1476 #endif /* CONFIG_64BIT */ 1477 1478 goto sigill; 1479 1480 case mm_sdp_func: 1481 #ifdef CONFIG_64BIT 1482 reg = insn.mm_m_format.rd; 1483 if (reg == 31) 1484 goto sigbus; 1485 1486 if (!access_ok(VERIFY_WRITE, addr, 16)) 1487 goto sigbus; 1488 1489 value = regs->regs[reg]; 1490 StoreDW(addr, value, res); 1491 if (res) 1492 goto fault; 1493 addr += 8; 1494 value = regs->regs[reg + 1]; 1495 StoreDW(addr, value, res); 1496 if (res) 1497 goto fault; 1498 goto success; 1499 #endif /* CONFIG_64BIT */ 1500 1501 goto sigill; 1502 1503 case mm_lwm32_func: 1504 reg = insn.mm_m_format.rd; 1505 rvar = reg & 0xf; 1506 if ((rvar > 9) || !reg) 1507 goto sigill; 1508 if (reg & 0x10) { 1509 if (!access_ok 1510 (VERIFY_READ, addr, 4 * (rvar + 1))) 1511 goto sigbus; 1512 } else { 1513 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1514 goto sigbus; 1515 } 1516 if (rvar == 9) 1517 rvar = 8; 1518 for (i = 16; rvar; rvar--, i++) { 1519 LoadW(addr, value, res); 1520 if (res) 1521 goto fault; 1522 addr += 4; 1523 regs->regs[i] = value; 1524 } 1525 if ((reg & 0xf) == 9) { 1526 LoadW(addr, value, res); 1527 if (res) 1528 goto fault; 1529 addr += 4; 1530 regs->regs[30] = value; 1531 } 1532 if (reg & 0x10) { 1533 LoadW(addr, value, res); 1534 if (res) 1535 goto fault; 1536 regs->regs[31] = value; 1537 } 1538 goto success; 1539 1540 case mm_swm32_func: 1541 reg = insn.mm_m_format.rd; 1542 rvar = reg & 0xf; 1543 if ((rvar > 9) || !reg) 1544 goto sigill; 1545 if (reg & 0x10) { 1546 if (!access_ok 1547 (VERIFY_WRITE, addr, 4 * (rvar + 1))) 1548 goto sigbus; 1549 } else { 1550 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1551 goto sigbus; 1552 } 1553 if (rvar == 9) 1554 rvar = 8; 1555 for (i = 16; rvar; rvar--, i++) { 1556 value = regs->regs[i]; 1557 StoreW(addr, value, res); 1558 if (res) 1559 goto fault; 1560 addr += 4; 1561 } 1562 if ((reg & 0xf) == 9) { 1563 value = regs->regs[30]; 1564 StoreW(addr, value, res); 1565 if (res) 1566 goto fault; 1567 addr += 4; 1568 } 1569 if (reg & 0x10) { 1570 value = regs->regs[31]; 1571 StoreW(addr, value, res); 1572 if (res) 1573 goto fault; 1574 } 1575 goto success; 1576 1577 case mm_ldm_func: 1578 #ifdef CONFIG_64BIT 1579 reg = insn.mm_m_format.rd; 1580 rvar = reg & 0xf; 1581 if ((rvar > 9) || !reg) 1582 goto sigill; 1583 if (reg & 0x10) { 1584 if (!access_ok 1585 (VERIFY_READ, addr, 8 * (rvar + 1))) 1586 goto sigbus; 1587 } else { 1588 if (!access_ok(VERIFY_READ, addr, 8 * rvar)) 1589 goto sigbus; 1590 } 1591 if (rvar == 9) 1592 rvar = 8; 1593 1594 for (i = 16; rvar; rvar--, i++) { 1595 LoadDW(addr, value, res); 1596 if (res) 1597 goto fault; 1598 addr += 4; 1599 regs->regs[i] = value; 1600 } 1601 if ((reg & 0xf) == 9) { 1602 LoadDW(addr, value, res); 1603 if (res) 1604 goto fault; 1605 addr += 8; 1606 regs->regs[30] = value; 1607 } 1608 if (reg & 0x10) { 1609 LoadDW(addr, value, res); 1610 if (res) 1611 goto fault; 1612 regs->regs[31] = value; 1613 } 1614 goto success; 1615 #endif /* CONFIG_64BIT */ 1616 1617 goto sigill; 1618 1619 case mm_sdm_func: 1620 #ifdef CONFIG_64BIT 1621 reg = insn.mm_m_format.rd; 1622 rvar = reg & 0xf; 1623 if ((rvar > 9) || !reg) 1624 goto sigill; 1625 if (reg & 0x10) { 1626 if (!access_ok 1627 (VERIFY_WRITE, addr, 8 * (rvar + 1))) 1628 goto sigbus; 1629 } else { 1630 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) 1631 goto sigbus; 1632 } 1633 if (rvar == 9) 1634 rvar = 8; 1635 1636 for (i = 16; rvar; rvar--, i++) { 1637 value = regs->regs[i]; 1638 StoreDW(addr, value, res); 1639 if (res) 1640 goto fault; 1641 addr += 8; 1642 } 1643 if ((reg & 0xf) == 9) { 1644 value = regs->regs[30]; 1645 StoreDW(addr, value, res); 1646 if (res) 1647 goto fault; 1648 addr += 8; 1649 } 1650 if (reg & 0x10) { 1651 value = regs->regs[31]; 1652 StoreDW(addr, value, res); 1653 if (res) 1654 goto fault; 1655 } 1656 goto success; 1657 #endif /* CONFIG_64BIT */ 1658 1659 goto sigill; 1660 1661 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 1662 } 1663 1664 goto sigbus; 1665 1666 case mm_pool32c_op: 1667 switch (insn.mm_m_format.func) { 1668 case mm_lwu_func: 1669 reg = insn.mm_m_format.rd; 1670 goto loadWU; 1671 } 1672 1673 /* LL,SC,LLD,SCD are not serviced */ 1674 goto sigbus; 1675 1676 case mm_pool32f_op: 1677 switch (insn.mm_x_format.func) { 1678 case mm_lwxc1_func: 1679 case mm_swxc1_func: 1680 case mm_ldxc1_func: 1681 case mm_sdxc1_func: 1682 goto fpu_emul; 1683 } 1684 1685 goto sigbus; 1686 1687 case mm_ldc132_op: 1688 case mm_sdc132_op: 1689 case mm_lwc132_op: 1690 case mm_swc132_op: 1691 fpu_emul: 1692 /* roll back jump/branch */ 1693 regs->cp0_epc = origpc; 1694 regs->regs[31] = orig31; 1695 1696 die_if_kernel("Unaligned FP access in kernel code", regs); 1697 BUG_ON(!used_math()); 1698 BUG_ON(!is_fpu_owner()); 1699 1700 lose_fpu(1); /* save the FPU state for the emulator */ 1701 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1702 &fault_addr); 1703 own_fpu(1); /* restore FPU state */ 1704 1705 /* If something went wrong, signal */ 1706 process_fpemu_return(res, fault_addr, 0); 1707 1708 if (res == 0) 1709 goto success; 1710 return; 1711 1712 case mm_lh32_op: 1713 reg = insn.mm_i_format.rt; 1714 goto loadHW; 1715 1716 case mm_lhu32_op: 1717 reg = insn.mm_i_format.rt; 1718 goto loadHWU; 1719 1720 case mm_lw32_op: 1721 reg = insn.mm_i_format.rt; 1722 goto loadW; 1723 1724 case mm_sh32_op: 1725 reg = insn.mm_i_format.rt; 1726 goto storeHW; 1727 1728 case mm_sw32_op: 1729 reg = insn.mm_i_format.rt; 1730 goto storeW; 1731 1732 case mm_ld32_op: 1733 reg = insn.mm_i_format.rt; 1734 goto loadDW; 1735 1736 case mm_sd32_op: 1737 reg = insn.mm_i_format.rt; 1738 goto storeDW; 1739 1740 case mm_pool16c_op: 1741 switch (insn.mm16_m_format.func) { 1742 case mm_lwm16_op: 1743 reg = insn.mm16_m_format.rlist; 1744 rvar = reg + 1; 1745 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1746 goto sigbus; 1747 1748 for (i = 16; rvar; rvar--, i++) { 1749 LoadW(addr, value, res); 1750 if (res) 1751 goto fault; 1752 addr += 4; 1753 regs->regs[i] = value; 1754 } 1755 LoadW(addr, value, res); 1756 if (res) 1757 goto fault; 1758 regs->regs[31] = value; 1759 1760 goto success; 1761 1762 case mm_swm16_op: 1763 reg = insn.mm16_m_format.rlist; 1764 rvar = reg + 1; 1765 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1766 goto sigbus; 1767 1768 for (i = 16; rvar; rvar--, i++) { 1769 value = regs->regs[i]; 1770 StoreW(addr, value, res); 1771 if (res) 1772 goto fault; 1773 addr += 4; 1774 } 1775 value = regs->regs[31]; 1776 StoreW(addr, value, res); 1777 if (res) 1778 goto fault; 1779 1780 goto success; 1781 1782 } 1783 1784 goto sigbus; 1785 1786 case mm_lhu16_op: 1787 reg = reg16to32[insn.mm16_rb_format.rt]; 1788 goto loadHWU; 1789 1790 case mm_lw16_op: 1791 reg = reg16to32[insn.mm16_rb_format.rt]; 1792 goto loadW; 1793 1794 case mm_sh16_op: 1795 reg = reg16to32st[insn.mm16_rb_format.rt]; 1796 goto storeHW; 1797 1798 case mm_sw16_op: 1799 reg = reg16to32st[insn.mm16_rb_format.rt]; 1800 goto storeW; 1801 1802 case mm_lwsp16_op: 1803 reg = insn.mm16_r5_format.rt; 1804 goto loadW; 1805 1806 case mm_swsp16_op: 1807 reg = insn.mm16_r5_format.rt; 1808 goto storeW; 1809 1810 case mm_lwgp16_op: 1811 reg = reg16to32[insn.mm16_r3_format.rt]; 1812 goto loadW; 1813 1814 default: 1815 goto sigill; 1816 } 1817 1818 loadHW: 1819 if (!access_ok(VERIFY_READ, addr, 2)) 1820 goto sigbus; 1821 1822 LoadHW(addr, value, res); 1823 if (res) 1824 goto fault; 1825 regs->regs[reg] = value; 1826 goto success; 1827 1828 loadHWU: 1829 if (!access_ok(VERIFY_READ, addr, 2)) 1830 goto sigbus; 1831 1832 LoadHWU(addr, value, res); 1833 if (res) 1834 goto fault; 1835 regs->regs[reg] = value; 1836 goto success; 1837 1838 loadW: 1839 if (!access_ok(VERIFY_READ, addr, 4)) 1840 goto sigbus; 1841 1842 LoadW(addr, value, res); 1843 if (res) 1844 goto fault; 1845 regs->regs[reg] = value; 1846 goto success; 1847 1848 loadWU: 1849 #ifdef CONFIG_64BIT 1850 /* 1851 * A 32-bit kernel might be running on a 64-bit processor. But 1852 * if we're on a 32-bit processor and an i-cache incoherency 1853 * or race makes us see a 64-bit instruction here the sdl/sdr 1854 * would blow up, so for now we don't handle unaligned 64-bit 1855 * instructions on 32-bit kernels. 1856 */ 1857 if (!access_ok(VERIFY_READ, addr, 4)) 1858 goto sigbus; 1859 1860 LoadWU(addr, value, res); 1861 if (res) 1862 goto fault; 1863 regs->regs[reg] = value; 1864 goto success; 1865 #endif /* CONFIG_64BIT */ 1866 1867 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1868 goto sigill; 1869 1870 loadDW: 1871 #ifdef CONFIG_64BIT 1872 /* 1873 * A 32-bit kernel might be running on a 64-bit processor. But 1874 * if we're on a 32-bit processor and an i-cache incoherency 1875 * or race makes us see a 64-bit instruction here the sdl/sdr 1876 * would blow up, so for now we don't handle unaligned 64-bit 1877 * instructions on 32-bit kernels. 1878 */ 1879 if (!access_ok(VERIFY_READ, addr, 8)) 1880 goto sigbus; 1881 1882 LoadDW(addr, value, res); 1883 if (res) 1884 goto fault; 1885 regs->regs[reg] = value; 1886 goto success; 1887 #endif /* CONFIG_64BIT */ 1888 1889 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1890 goto sigill; 1891 1892 storeHW: 1893 if (!access_ok(VERIFY_WRITE, addr, 2)) 1894 goto sigbus; 1895 1896 value = regs->regs[reg]; 1897 StoreHW(addr, value, res); 1898 if (res) 1899 goto fault; 1900 goto success; 1901 1902 storeW: 1903 if (!access_ok(VERIFY_WRITE, addr, 4)) 1904 goto sigbus; 1905 1906 value = regs->regs[reg]; 1907 StoreW(addr, value, res); 1908 if (res) 1909 goto fault; 1910 goto success; 1911 1912 storeDW: 1913 #ifdef CONFIG_64BIT 1914 /* 1915 * A 32-bit kernel might be running on a 64-bit processor. But 1916 * if we're on a 32-bit processor and an i-cache incoherency 1917 * or race makes us see a 64-bit instruction here the sdl/sdr 1918 * would blow up, so for now we don't handle unaligned 64-bit 1919 * instructions on 32-bit kernels. 1920 */ 1921 if (!access_ok(VERIFY_WRITE, addr, 8)) 1922 goto sigbus; 1923 1924 value = regs->regs[reg]; 1925 StoreDW(addr, value, res); 1926 if (res) 1927 goto fault; 1928 goto success; 1929 #endif /* CONFIG_64BIT */ 1930 1931 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1932 goto sigill; 1933 1934 success: 1935 regs->cp0_epc = contpc; /* advance or branch */ 1936 1937 #ifdef CONFIG_DEBUG_FS 1938 unaligned_instructions++; 1939 #endif 1940 return; 1941 1942 fault: 1943 /* roll back jump/branch */ 1944 regs->cp0_epc = origpc; 1945 regs->regs[31] = orig31; 1946 /* Did we have an exception handler installed? */ 1947 if (fixup_exception(regs)) 1948 return; 1949 1950 die_if_kernel("Unhandled kernel unaligned access", regs); 1951 force_sig(SIGSEGV, current); 1952 1953 return; 1954 1955 sigbus: 1956 die_if_kernel("Unhandled kernel unaligned access", regs); 1957 force_sig(SIGBUS, current); 1958 1959 return; 1960 1961 sigill: 1962 die_if_kernel 1963 ("Unhandled kernel unaligned access or invalid instruction", regs); 1964 force_sig(SIGILL, current); 1965 } 1966 1967 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 1968 { 1969 unsigned long value; 1970 unsigned int res; 1971 int reg; 1972 unsigned long orig31; 1973 u16 __user *pc16; 1974 unsigned long origpc; 1975 union mips16e_instruction mips16inst, oldinst; 1976 1977 origpc = regs->cp0_epc; 1978 orig31 = regs->regs[31]; 1979 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 1980 /* 1981 * This load never faults. 1982 */ 1983 __get_user(mips16inst.full, pc16); 1984 oldinst = mips16inst; 1985 1986 /* skip EXTEND instruction */ 1987 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 1988 pc16++; 1989 __get_user(mips16inst.full, pc16); 1990 } else if (delay_slot(regs)) { 1991 /* skip jump instructions */ 1992 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 1993 if (mips16inst.ri.opcode == MIPS16e_jal_op) 1994 pc16++; 1995 pc16++; 1996 if (get_user(mips16inst.full, pc16)) 1997 goto sigbus; 1998 } 1999 2000 switch (mips16inst.ri.opcode) { 2001 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 2002 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 2003 case MIPS16e_ldpc_func: 2004 case MIPS16e_ldsp_func: 2005 reg = reg16to32[mips16inst.ri64.ry]; 2006 goto loadDW; 2007 2008 case MIPS16e_sdsp_func: 2009 reg = reg16to32[mips16inst.ri64.ry]; 2010 goto writeDW; 2011 2012 case MIPS16e_sdrasp_func: 2013 reg = 29; /* GPRSP */ 2014 goto writeDW; 2015 } 2016 2017 goto sigbus; 2018 2019 case MIPS16e_swsp_op: 2020 case MIPS16e_lwpc_op: 2021 case MIPS16e_lwsp_op: 2022 reg = reg16to32[mips16inst.ri.rx]; 2023 break; 2024 2025 case MIPS16e_i8_op: 2026 if (mips16inst.i8.func != MIPS16e_swrasp_func) 2027 goto sigbus; 2028 reg = 29; /* GPRSP */ 2029 break; 2030 2031 default: 2032 reg = reg16to32[mips16inst.rri.ry]; 2033 break; 2034 } 2035 2036 switch (mips16inst.ri.opcode) { 2037 2038 case MIPS16e_lb_op: 2039 case MIPS16e_lbu_op: 2040 case MIPS16e_sb_op: 2041 goto sigbus; 2042 2043 case MIPS16e_lh_op: 2044 if (!access_ok(VERIFY_READ, addr, 2)) 2045 goto sigbus; 2046 2047 LoadHW(addr, value, res); 2048 if (res) 2049 goto fault; 2050 MIPS16e_compute_return_epc(regs, &oldinst); 2051 regs->regs[reg] = value; 2052 break; 2053 2054 case MIPS16e_lhu_op: 2055 if (!access_ok(VERIFY_READ, addr, 2)) 2056 goto sigbus; 2057 2058 LoadHWU(addr, value, res); 2059 if (res) 2060 goto fault; 2061 MIPS16e_compute_return_epc(regs, &oldinst); 2062 regs->regs[reg] = value; 2063 break; 2064 2065 case MIPS16e_lw_op: 2066 case MIPS16e_lwpc_op: 2067 case MIPS16e_lwsp_op: 2068 if (!access_ok(VERIFY_READ, addr, 4)) 2069 goto sigbus; 2070 2071 LoadW(addr, value, res); 2072 if (res) 2073 goto fault; 2074 MIPS16e_compute_return_epc(regs, &oldinst); 2075 regs->regs[reg] = value; 2076 break; 2077 2078 case MIPS16e_lwu_op: 2079 #ifdef CONFIG_64BIT 2080 /* 2081 * A 32-bit kernel might be running on a 64-bit processor. But 2082 * if we're on a 32-bit processor and an i-cache incoherency 2083 * or race makes us see a 64-bit instruction here the sdl/sdr 2084 * would blow up, so for now we don't handle unaligned 64-bit 2085 * instructions on 32-bit kernels. 2086 */ 2087 if (!access_ok(VERIFY_READ, addr, 4)) 2088 goto sigbus; 2089 2090 LoadWU(addr, value, res); 2091 if (res) 2092 goto fault; 2093 MIPS16e_compute_return_epc(regs, &oldinst); 2094 regs->regs[reg] = value; 2095 break; 2096 #endif /* CONFIG_64BIT */ 2097 2098 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2099 goto sigill; 2100 2101 case MIPS16e_ld_op: 2102 loadDW: 2103 #ifdef CONFIG_64BIT 2104 /* 2105 * A 32-bit kernel might be running on a 64-bit processor. But 2106 * if we're on a 32-bit processor and an i-cache incoherency 2107 * or race makes us see a 64-bit instruction here the sdl/sdr 2108 * would blow up, so for now we don't handle unaligned 64-bit 2109 * instructions on 32-bit kernels. 2110 */ 2111 if (!access_ok(VERIFY_READ, addr, 8)) 2112 goto sigbus; 2113 2114 LoadDW(addr, value, res); 2115 if (res) 2116 goto fault; 2117 MIPS16e_compute_return_epc(regs, &oldinst); 2118 regs->regs[reg] = value; 2119 break; 2120 #endif /* CONFIG_64BIT */ 2121 2122 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2123 goto sigill; 2124 2125 case MIPS16e_sh_op: 2126 if (!access_ok(VERIFY_WRITE, addr, 2)) 2127 goto sigbus; 2128 2129 MIPS16e_compute_return_epc(regs, &oldinst); 2130 value = regs->regs[reg]; 2131 StoreHW(addr, value, res); 2132 if (res) 2133 goto fault; 2134 break; 2135 2136 case MIPS16e_sw_op: 2137 case MIPS16e_swsp_op: 2138 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 2139 if (!access_ok(VERIFY_WRITE, addr, 4)) 2140 goto sigbus; 2141 2142 MIPS16e_compute_return_epc(regs, &oldinst); 2143 value = regs->regs[reg]; 2144 StoreW(addr, value, res); 2145 if (res) 2146 goto fault; 2147 break; 2148 2149 case MIPS16e_sd_op: 2150 writeDW: 2151 #ifdef CONFIG_64BIT 2152 /* 2153 * A 32-bit kernel might be running on a 64-bit processor. But 2154 * if we're on a 32-bit processor and an i-cache incoherency 2155 * or race makes us see a 64-bit instruction here the sdl/sdr 2156 * would blow up, so for now we don't handle unaligned 64-bit 2157 * instructions on 32-bit kernels. 2158 */ 2159 if (!access_ok(VERIFY_WRITE, addr, 8)) 2160 goto sigbus; 2161 2162 MIPS16e_compute_return_epc(regs, &oldinst); 2163 value = regs->regs[reg]; 2164 StoreDW(addr, value, res); 2165 if (res) 2166 goto fault; 2167 break; 2168 #endif /* CONFIG_64BIT */ 2169 2170 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2171 goto sigill; 2172 2173 default: 2174 /* 2175 * Pheeee... We encountered an yet unknown instruction or 2176 * cache coherence problem. Die sucker, die ... 2177 */ 2178 goto sigill; 2179 } 2180 2181 #ifdef CONFIG_DEBUG_FS 2182 unaligned_instructions++; 2183 #endif 2184 2185 return; 2186 2187 fault: 2188 /* roll back jump/branch */ 2189 regs->cp0_epc = origpc; 2190 regs->regs[31] = orig31; 2191 /* Did we have an exception handler installed? */ 2192 if (fixup_exception(regs)) 2193 return; 2194 2195 die_if_kernel("Unhandled kernel unaligned access", regs); 2196 force_sig(SIGSEGV, current); 2197 2198 return; 2199 2200 sigbus: 2201 die_if_kernel("Unhandled kernel unaligned access", regs); 2202 force_sig(SIGBUS, current); 2203 2204 return; 2205 2206 sigill: 2207 die_if_kernel 2208 ("Unhandled kernel unaligned access or invalid instruction", regs); 2209 force_sig(SIGILL, current); 2210 } 2211 2212 asmlinkage void do_ade(struct pt_regs *regs) 2213 { 2214 enum ctx_state prev_state; 2215 unsigned int __user *pc; 2216 mm_segment_t seg; 2217 2218 prev_state = exception_enter(); 2219 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 2220 1, regs, regs->cp0_badvaddr); 2221 /* 2222 * Did we catch a fault trying to load an instruction? 2223 */ 2224 if (regs->cp0_badvaddr == regs->cp0_epc) 2225 goto sigbus; 2226 2227 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 2228 goto sigbus; 2229 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 2230 goto sigbus; 2231 2232 /* 2233 * Do branch emulation only if we didn't forward the exception. 2234 * This is all so but ugly ... 2235 */ 2236 2237 /* 2238 * Are we running in microMIPS mode? 2239 */ 2240 if (get_isa16_mode(regs->cp0_epc)) { 2241 /* 2242 * Did we catch a fault trying to load an instruction in 2243 * 16-bit mode? 2244 */ 2245 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 2246 goto sigbus; 2247 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2248 show_registers(regs); 2249 2250 if (cpu_has_mmips) { 2251 seg = get_fs(); 2252 if (!user_mode(regs)) 2253 set_fs(KERNEL_DS); 2254 emulate_load_store_microMIPS(regs, 2255 (void __user *)regs->cp0_badvaddr); 2256 set_fs(seg); 2257 2258 return; 2259 } 2260 2261 if (cpu_has_mips16) { 2262 seg = get_fs(); 2263 if (!user_mode(regs)) 2264 set_fs(KERNEL_DS); 2265 emulate_load_store_MIPS16e(regs, 2266 (void __user *)regs->cp0_badvaddr); 2267 set_fs(seg); 2268 2269 return; 2270 } 2271 2272 goto sigbus; 2273 } 2274 2275 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2276 show_registers(regs); 2277 pc = (unsigned int __user *)exception_epc(regs); 2278 2279 seg = get_fs(); 2280 if (!user_mode(regs)) 2281 set_fs(KERNEL_DS); 2282 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 2283 set_fs(seg); 2284 2285 return; 2286 2287 sigbus: 2288 die_if_kernel("Kernel unaligned instruction access", regs); 2289 force_sig(SIGBUS, current); 2290 2291 /* 2292 * XXX On return from the signal handler we should advance the epc 2293 */ 2294 exception_exit(prev_state); 2295 } 2296 2297 #ifdef CONFIG_DEBUG_FS 2298 extern struct dentry *mips_debugfs_dir; 2299 static int __init debugfs_unaligned(void) 2300 { 2301 struct dentry *d; 2302 2303 if (!mips_debugfs_dir) 2304 return -ENODEV; 2305 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 2306 mips_debugfs_dir, &unaligned_instructions); 2307 if (!d) 2308 return -ENOMEM; 2309 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 2310 mips_debugfs_dir, &unaligned_action); 2311 if (!d) 2312 return -ENOMEM; 2313 return 0; 2314 } 2315 arch_initcall(debugfs_unaligned); 2316 #endif 2317