1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/fpu.h> 89 #include <asm/fpu_emulator.h> 90 #include <asm/inst.h> 91 #include <asm/uaccess.h> 92 93 #define STR(x) __STR(x) 94 #define __STR(x) #x 95 96 enum { 97 UNALIGNED_ACTION_QUIET, 98 UNALIGNED_ACTION_SIGNAL, 99 UNALIGNED_ACTION_SHOW, 100 }; 101 #ifdef CONFIG_DEBUG_FS 102 static u32 unaligned_instructions; 103 static u32 unaligned_action; 104 #else 105 #define unaligned_action UNALIGNED_ACTION_QUIET 106 #endif 107 extern void show_registers(struct pt_regs *regs); 108 109 #ifdef __BIG_ENDIAN 110 #define LoadHW(addr, value, res) \ 111 __asm__ __volatile__ (".set\tnoat\n" \ 112 "1:\t"user_lb("%0", "0(%2)")"\n" \ 113 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ 114 "sll\t%0, 0x8\n\t" \ 115 "or\t%0, $1\n\t" \ 116 "li\t%1, 0\n" \ 117 "3:\t.set\tat\n\t" \ 118 ".insn\n\t" \ 119 ".section\t.fixup,\"ax\"\n\t" \ 120 "4:\tli\t%1, %3\n\t" \ 121 "j\t3b\n\t" \ 122 ".previous\n\t" \ 123 ".section\t__ex_table,\"a\"\n\t" \ 124 STR(PTR)"\t1b, 4b\n\t" \ 125 STR(PTR)"\t2b, 4b\n\t" \ 126 ".previous" \ 127 : "=&r" (value), "=r" (res) \ 128 : "r" (addr), "i" (-EFAULT)); 129 130 #ifndef CONFIG_CPU_MIPSR6 131 #define LoadW(addr, value, res) \ 132 __asm__ __volatile__ ( \ 133 "1:\t"user_lwl("%0", "(%2)")"\n" \ 134 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ 135 "li\t%1, 0\n" \ 136 "3:\n\t" \ 137 ".insn\n\t" \ 138 ".section\t.fixup,\"ax\"\n\t" \ 139 "4:\tli\t%1, %3\n\t" \ 140 "j\t3b\n\t" \ 141 ".previous\n\t" \ 142 ".section\t__ex_table,\"a\"\n\t" \ 143 STR(PTR)"\t1b, 4b\n\t" \ 144 STR(PTR)"\t2b, 4b\n\t" \ 145 ".previous" \ 146 : "=&r" (value), "=r" (res) \ 147 : "r" (addr), "i" (-EFAULT)); 148 #else 149 /* MIPSR6 has no lwl instruction */ 150 #define LoadW(addr, value, res) \ 151 __asm__ __volatile__ ( \ 152 ".set\tpush\n" \ 153 ".set\tnoat\n\t" \ 154 "1:"user_lb("%0", "0(%2)")"\n\t" \ 155 "2:"user_lbu("$1", "1(%2)")"\n\t" \ 156 "sll\t%0, 0x8\n\t" \ 157 "or\t%0, $1\n\t" \ 158 "3:"user_lbu("$1", "2(%2)")"\n\t" \ 159 "sll\t%0, 0x8\n\t" \ 160 "or\t%0, $1\n\t" \ 161 "4:"user_lbu("$1", "3(%2)")"\n\t" \ 162 "sll\t%0, 0x8\n\t" \ 163 "or\t%0, $1\n\t" \ 164 "li\t%1, 0\n" \ 165 ".set\tpop\n" \ 166 "10:\n\t" \ 167 ".insn\n\t" \ 168 ".section\t.fixup,\"ax\"\n\t" \ 169 "11:\tli\t%1, %3\n\t" \ 170 "j\t10b\n\t" \ 171 ".previous\n\t" \ 172 ".section\t__ex_table,\"a\"\n\t" \ 173 STR(PTR)"\t1b, 11b\n\t" \ 174 STR(PTR)"\t2b, 11b\n\t" \ 175 STR(PTR)"\t3b, 11b\n\t" \ 176 STR(PTR)"\t4b, 11b\n\t" \ 177 ".previous" \ 178 : "=&r" (value), "=r" (res) \ 179 : "r" (addr), "i" (-EFAULT)); 180 #endif /* CONFIG_CPU_MIPSR6 */ 181 182 #define LoadHWU(addr, value, res) \ 183 __asm__ __volatile__ ( \ 184 ".set\tnoat\n" \ 185 "1:\t"user_lbu("%0", "0(%2)")"\n" \ 186 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ 187 "sll\t%0, 0x8\n\t" \ 188 "or\t%0, $1\n\t" \ 189 "li\t%1, 0\n" \ 190 "3:\n\t" \ 191 ".insn\n\t" \ 192 ".set\tat\n\t" \ 193 ".section\t.fixup,\"ax\"\n\t" \ 194 "4:\tli\t%1, %3\n\t" \ 195 "j\t3b\n\t" \ 196 ".previous\n\t" \ 197 ".section\t__ex_table,\"a\"\n\t" \ 198 STR(PTR)"\t1b, 4b\n\t" \ 199 STR(PTR)"\t2b, 4b\n\t" \ 200 ".previous" \ 201 : "=&r" (value), "=r" (res) \ 202 : "r" (addr), "i" (-EFAULT)); 203 204 #ifndef CONFIG_CPU_MIPSR6 205 #define LoadWU(addr, value, res) \ 206 __asm__ __volatile__ ( \ 207 "1:\t"user_lwl("%0", "(%2)")"\n" \ 208 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ 209 "dsll\t%0, %0, 32\n\t" \ 210 "dsrl\t%0, %0, 32\n\t" \ 211 "li\t%1, 0\n" \ 212 "3:\n\t" \ 213 ".insn\n\t" \ 214 "\t.section\t.fixup,\"ax\"\n\t" \ 215 "4:\tli\t%1, %3\n\t" \ 216 "j\t3b\n\t" \ 217 ".previous\n\t" \ 218 ".section\t__ex_table,\"a\"\n\t" \ 219 STR(PTR)"\t1b, 4b\n\t" \ 220 STR(PTR)"\t2b, 4b\n\t" \ 221 ".previous" \ 222 : "=&r" (value), "=r" (res) \ 223 : "r" (addr), "i" (-EFAULT)); 224 225 #define LoadDW(addr, value, res) \ 226 __asm__ __volatile__ ( \ 227 "1:\tldl\t%0, (%2)\n" \ 228 "2:\tldr\t%0, 7(%2)\n\t" \ 229 "li\t%1, 0\n" \ 230 "3:\n\t" \ 231 ".insn\n\t" \ 232 "\t.section\t.fixup,\"ax\"\n\t" \ 233 "4:\tli\t%1, %3\n\t" \ 234 "j\t3b\n\t" \ 235 ".previous\n\t" \ 236 ".section\t__ex_table,\"a\"\n\t" \ 237 STR(PTR)"\t1b, 4b\n\t" \ 238 STR(PTR)"\t2b, 4b\n\t" \ 239 ".previous" \ 240 : "=&r" (value), "=r" (res) \ 241 : "r" (addr), "i" (-EFAULT)); 242 #else 243 /* MIPSR6 has not lwl and ldl instructions */ 244 #define LoadWU(addr, value, res) \ 245 __asm__ __volatile__ ( \ 246 ".set\tpush\n\t" \ 247 ".set\tnoat\n\t" \ 248 "1:"user_lbu("%0", "0(%2)")"\n\t" \ 249 "2:"user_lbu("$1", "1(%2)")"\n\t" \ 250 "sll\t%0, 0x8\n\t" \ 251 "or\t%0, $1\n\t" \ 252 "3:"user_lbu("$1", "2(%2)")"\n\t" \ 253 "sll\t%0, 0x8\n\t" \ 254 "or\t%0, $1\n\t" \ 255 "4:"user_lbu("$1", "3(%2)")"\n\t" \ 256 "sll\t%0, 0x8\n\t" \ 257 "or\t%0, $1\n\t" \ 258 "li\t%1, 0\n" \ 259 ".set\tpop\n" \ 260 "10:\n\t" \ 261 ".insn\n\t" \ 262 ".section\t.fixup,\"ax\"\n\t" \ 263 "11:\tli\t%1, %3\n\t" \ 264 "j\t10b\n\t" \ 265 ".previous\n\t" \ 266 ".section\t__ex_table,\"a\"\n\t" \ 267 STR(PTR)"\t1b, 11b\n\t" \ 268 STR(PTR)"\t2b, 11b\n\t" \ 269 STR(PTR)"\t3b, 11b\n\t" \ 270 STR(PTR)"\t4b, 11b\n\t" \ 271 ".previous" \ 272 : "=&r" (value), "=r" (res) \ 273 : "r" (addr), "i" (-EFAULT)); 274 275 #define LoadDW(addr, value, res) \ 276 __asm__ __volatile__ ( \ 277 ".set\tpush\n\t" \ 278 ".set\tnoat\n\t" \ 279 "1:lb\t%0, 0(%2)\n\t" \ 280 "2:lbu\t $1, 1(%2)\n\t" \ 281 "dsll\t%0, 0x8\n\t" \ 282 "or\t%0, $1\n\t" \ 283 "3:lbu\t$1, 2(%2)\n\t" \ 284 "dsll\t%0, 0x8\n\t" \ 285 "or\t%0, $1\n\t" \ 286 "4:lbu\t$1, 3(%2)\n\t" \ 287 "dsll\t%0, 0x8\n\t" \ 288 "or\t%0, $1\n\t" \ 289 "5:lbu\t$1, 4(%2)\n\t" \ 290 "dsll\t%0, 0x8\n\t" \ 291 "or\t%0, $1\n\t" \ 292 "6:lbu\t$1, 5(%2)\n\t" \ 293 "dsll\t%0, 0x8\n\t" \ 294 "or\t%0, $1\n\t" \ 295 "7:lbu\t$1, 6(%2)\n\t" \ 296 "dsll\t%0, 0x8\n\t" \ 297 "or\t%0, $1\n\t" \ 298 "8:lbu\t$1, 7(%2)\n\t" \ 299 "dsll\t%0, 0x8\n\t" \ 300 "or\t%0, $1\n\t" \ 301 "li\t%1, 0\n" \ 302 ".set\tpop\n\t" \ 303 "10:\n\t" \ 304 ".insn\n\t" \ 305 ".section\t.fixup,\"ax\"\n\t" \ 306 "11:\tli\t%1, %3\n\t" \ 307 "j\t10b\n\t" \ 308 ".previous\n\t" \ 309 ".section\t__ex_table,\"a\"\n\t" \ 310 STR(PTR)"\t1b, 11b\n\t" \ 311 STR(PTR)"\t2b, 11b\n\t" \ 312 STR(PTR)"\t3b, 11b\n\t" \ 313 STR(PTR)"\t4b, 11b\n\t" \ 314 STR(PTR)"\t5b, 11b\n\t" \ 315 STR(PTR)"\t6b, 11b\n\t" \ 316 STR(PTR)"\t7b, 11b\n\t" \ 317 STR(PTR)"\t8b, 11b\n\t" \ 318 ".previous" \ 319 : "=&r" (value), "=r" (res) \ 320 : "r" (addr), "i" (-EFAULT)); 321 #endif /* CONFIG_CPU_MIPSR6 */ 322 323 324 #define StoreHW(addr, value, res) \ 325 __asm__ __volatile__ ( \ 326 ".set\tnoat\n" \ 327 "1:\t"user_sb("%1", "1(%2)")"\n" \ 328 "srl\t$1, %1, 0x8\n" \ 329 "2:\t"user_sb("$1", "0(%2)")"\n" \ 330 ".set\tat\n\t" \ 331 "li\t%0, 0\n" \ 332 "3:\n\t" \ 333 ".insn\n\t" \ 334 ".section\t.fixup,\"ax\"\n\t" \ 335 "4:\tli\t%0, %3\n\t" \ 336 "j\t3b\n\t" \ 337 ".previous\n\t" \ 338 ".section\t__ex_table,\"a\"\n\t" \ 339 STR(PTR)"\t1b, 4b\n\t" \ 340 STR(PTR)"\t2b, 4b\n\t" \ 341 ".previous" \ 342 : "=r" (res) \ 343 : "r" (value), "r" (addr), "i" (-EFAULT)); 344 345 #ifndef CONFIG_CPU_MIPSR6 346 #define StoreW(addr, value, res) \ 347 __asm__ __volatile__ ( \ 348 "1:\t"user_swl("%1", "(%2)")"\n" \ 349 "2:\t"user_swr("%1", "3(%2)")"\n\t" \ 350 "li\t%0, 0\n" \ 351 "3:\n\t" \ 352 ".insn\n\t" \ 353 ".section\t.fixup,\"ax\"\n\t" \ 354 "4:\tli\t%0, %3\n\t" \ 355 "j\t3b\n\t" \ 356 ".previous\n\t" \ 357 ".section\t__ex_table,\"a\"\n\t" \ 358 STR(PTR)"\t1b, 4b\n\t" \ 359 STR(PTR)"\t2b, 4b\n\t" \ 360 ".previous" \ 361 : "=r" (res) \ 362 : "r" (value), "r" (addr), "i" (-EFAULT)); 363 364 #define StoreDW(addr, value, res) \ 365 __asm__ __volatile__ ( \ 366 "1:\tsdl\t%1,(%2)\n" \ 367 "2:\tsdr\t%1, 7(%2)\n\t" \ 368 "li\t%0, 0\n" \ 369 "3:\n\t" \ 370 ".insn\n\t" \ 371 ".section\t.fixup,\"ax\"\n\t" \ 372 "4:\tli\t%0, %3\n\t" \ 373 "j\t3b\n\t" \ 374 ".previous\n\t" \ 375 ".section\t__ex_table,\"a\"\n\t" \ 376 STR(PTR)"\t1b, 4b\n\t" \ 377 STR(PTR)"\t2b, 4b\n\t" \ 378 ".previous" \ 379 : "=r" (res) \ 380 : "r" (value), "r" (addr), "i" (-EFAULT)); 381 #else 382 /* MIPSR6 has no swl and sdl instructions */ 383 #define StoreW(addr, value, res) \ 384 __asm__ __volatile__ ( \ 385 ".set\tpush\n\t" \ 386 ".set\tnoat\n\t" \ 387 "1:"user_sb("%1", "3(%2)")"\n\t" \ 388 "srl\t$1, %1, 0x8\n\t" \ 389 "2:"user_sb("$1", "2(%2)")"\n\t" \ 390 "srl\t$1, $1, 0x8\n\t" \ 391 "3:"user_sb("$1", "1(%2)")"\n\t" \ 392 "srl\t$1, $1, 0x8\n\t" \ 393 "4:"user_sb("$1", "0(%2)")"\n\t" \ 394 ".set\tpop\n\t" \ 395 "li\t%0, 0\n" \ 396 "10:\n\t" \ 397 ".insn\n\t" \ 398 ".section\t.fixup,\"ax\"\n\t" \ 399 "11:\tli\t%0, %3\n\t" \ 400 "j\t10b\n\t" \ 401 ".previous\n\t" \ 402 ".section\t__ex_table,\"a\"\n\t" \ 403 STR(PTR)"\t1b, 11b\n\t" \ 404 STR(PTR)"\t2b, 11b\n\t" \ 405 STR(PTR)"\t3b, 11b\n\t" \ 406 STR(PTR)"\t4b, 11b\n\t" \ 407 ".previous" \ 408 : "=&r" (res) \ 409 : "r" (value), "r" (addr), "i" (-EFAULT) \ 410 : "memory"); 411 412 #define StoreDW(addr, value, res) \ 413 __asm__ __volatile__ ( \ 414 ".set\tpush\n\t" \ 415 ".set\tnoat\n\t" \ 416 "1:sb\t%1, 7(%2)\n\t" \ 417 "dsrl\t$1, %1, 0x8\n\t" \ 418 "2:sb\t$1, 6(%2)\n\t" \ 419 "dsrl\t$1, $1, 0x8\n\t" \ 420 "3:sb\t$1, 5(%2)\n\t" \ 421 "dsrl\t$1, $1, 0x8\n\t" \ 422 "4:sb\t$1, 4(%2)\n\t" \ 423 "dsrl\t$1, $1, 0x8\n\t" \ 424 "5:sb\t$1, 3(%2)\n\t" \ 425 "dsrl\t$1, $1, 0x8\n\t" \ 426 "6:sb\t$1, 2(%2)\n\t" \ 427 "dsrl\t$1, $1, 0x8\n\t" \ 428 "7:sb\t$1, 1(%2)\n\t" \ 429 "dsrl\t$1, $1, 0x8\n\t" \ 430 "8:sb\t$1, 0(%2)\n\t" \ 431 "dsrl\t$1, $1, 0x8\n\t" \ 432 ".set\tpop\n\t" \ 433 "li\t%0, 0\n" \ 434 "10:\n\t" \ 435 ".insn\n\t" \ 436 ".section\t.fixup,\"ax\"\n\t" \ 437 "11:\tli\t%0, %3\n\t" \ 438 "j\t10b\n\t" \ 439 ".previous\n\t" \ 440 ".section\t__ex_table,\"a\"\n\t" \ 441 STR(PTR)"\t1b, 11b\n\t" \ 442 STR(PTR)"\t2b, 11b\n\t" \ 443 STR(PTR)"\t3b, 11b\n\t" \ 444 STR(PTR)"\t4b, 11b\n\t" \ 445 STR(PTR)"\t5b, 11b\n\t" \ 446 STR(PTR)"\t6b, 11b\n\t" \ 447 STR(PTR)"\t7b, 11b\n\t" \ 448 STR(PTR)"\t8b, 11b\n\t" \ 449 ".previous" \ 450 : "=&r" (res) \ 451 : "r" (value), "r" (addr), "i" (-EFAULT) \ 452 : "memory"); 453 #endif /* CONFIG_CPU_MIPSR6 */ 454 455 #else /* __BIG_ENDIAN */ 456 457 #define LoadHW(addr, value, res) \ 458 __asm__ __volatile__ (".set\tnoat\n" \ 459 "1:\t"user_lb("%0", "1(%2)")"\n" \ 460 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ 461 "sll\t%0, 0x8\n\t" \ 462 "or\t%0, $1\n\t" \ 463 "li\t%1, 0\n" \ 464 "3:\t.set\tat\n\t" \ 465 ".insn\n\t" \ 466 ".section\t.fixup,\"ax\"\n\t" \ 467 "4:\tli\t%1, %3\n\t" \ 468 "j\t3b\n\t" \ 469 ".previous\n\t" \ 470 ".section\t__ex_table,\"a\"\n\t" \ 471 STR(PTR)"\t1b, 4b\n\t" \ 472 STR(PTR)"\t2b, 4b\n\t" \ 473 ".previous" \ 474 : "=&r" (value), "=r" (res) \ 475 : "r" (addr), "i" (-EFAULT)); 476 477 #ifndef CONFIG_CPU_MIPSR6 478 #define LoadW(addr, value, res) \ 479 __asm__ __volatile__ ( \ 480 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 481 "2:\t"user_lwr("%0", "(%2)")"\n\t" \ 482 "li\t%1, 0\n" \ 483 "3:\n\t" \ 484 ".insn\n\t" \ 485 ".section\t.fixup,\"ax\"\n\t" \ 486 "4:\tli\t%1, %3\n\t" \ 487 "j\t3b\n\t" \ 488 ".previous\n\t" \ 489 ".section\t__ex_table,\"a\"\n\t" \ 490 STR(PTR)"\t1b, 4b\n\t" \ 491 STR(PTR)"\t2b, 4b\n\t" \ 492 ".previous" \ 493 : "=&r" (value), "=r" (res) \ 494 : "r" (addr), "i" (-EFAULT)); 495 #else 496 /* MIPSR6 has no lwl instruction */ 497 #define LoadW(addr, value, res) \ 498 __asm__ __volatile__ ( \ 499 ".set\tpush\n" \ 500 ".set\tnoat\n\t" \ 501 "1:"user_lb("%0", "3(%2)")"\n\t" \ 502 "2:"user_lbu("$1", "2(%2)")"\n\t" \ 503 "sll\t%0, 0x8\n\t" \ 504 "or\t%0, $1\n\t" \ 505 "3:"user_lbu("$1", "1(%2)")"\n\t" \ 506 "sll\t%0, 0x8\n\t" \ 507 "or\t%0, $1\n\t" \ 508 "4:"user_lbu("$1", "0(%2)")"\n\t" \ 509 "sll\t%0, 0x8\n\t" \ 510 "or\t%0, $1\n\t" \ 511 "li\t%1, 0\n" \ 512 ".set\tpop\n" \ 513 "10:\n\t" \ 514 ".insn\n\t" \ 515 ".section\t.fixup,\"ax\"\n\t" \ 516 "11:\tli\t%1, %3\n\t" \ 517 "j\t10b\n\t" \ 518 ".previous\n\t" \ 519 ".section\t__ex_table,\"a\"\n\t" \ 520 STR(PTR)"\t1b, 11b\n\t" \ 521 STR(PTR)"\t2b, 11b\n\t" \ 522 STR(PTR)"\t3b, 11b\n\t" \ 523 STR(PTR)"\t4b, 11b\n\t" \ 524 ".previous" \ 525 : "=&r" (value), "=r" (res) \ 526 : "r" (addr), "i" (-EFAULT)); 527 #endif /* CONFIG_CPU_MIPSR6 */ 528 529 530 #define LoadHWU(addr, value, res) \ 531 __asm__ __volatile__ ( \ 532 ".set\tnoat\n" \ 533 "1:\t"user_lbu("%0", "1(%2)")"\n" \ 534 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ 535 "sll\t%0, 0x8\n\t" \ 536 "or\t%0, $1\n\t" \ 537 "li\t%1, 0\n" \ 538 "3:\n\t" \ 539 ".insn\n\t" \ 540 ".set\tat\n\t" \ 541 ".section\t.fixup,\"ax\"\n\t" \ 542 "4:\tli\t%1, %3\n\t" \ 543 "j\t3b\n\t" \ 544 ".previous\n\t" \ 545 ".section\t__ex_table,\"a\"\n\t" \ 546 STR(PTR)"\t1b, 4b\n\t" \ 547 STR(PTR)"\t2b, 4b\n\t" \ 548 ".previous" \ 549 : "=&r" (value), "=r" (res) \ 550 : "r" (addr), "i" (-EFAULT)); 551 552 #ifndef CONFIG_CPU_MIPSR6 553 #define LoadWU(addr, value, res) \ 554 __asm__ __volatile__ ( \ 555 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 556 "2:\t"user_lwr("%0", "(%2)")"\n\t" \ 557 "dsll\t%0, %0, 32\n\t" \ 558 "dsrl\t%0, %0, 32\n\t" \ 559 "li\t%1, 0\n" \ 560 "3:\n\t" \ 561 ".insn\n\t" \ 562 "\t.section\t.fixup,\"ax\"\n\t" \ 563 "4:\tli\t%1, %3\n\t" \ 564 "j\t3b\n\t" \ 565 ".previous\n\t" \ 566 ".section\t__ex_table,\"a\"\n\t" \ 567 STR(PTR)"\t1b, 4b\n\t" \ 568 STR(PTR)"\t2b, 4b\n\t" \ 569 ".previous" \ 570 : "=&r" (value), "=r" (res) \ 571 : "r" (addr), "i" (-EFAULT)); 572 573 #define LoadDW(addr, value, res) \ 574 __asm__ __volatile__ ( \ 575 "1:\tldl\t%0, 7(%2)\n" \ 576 "2:\tldr\t%0, (%2)\n\t" \ 577 "li\t%1, 0\n" \ 578 "3:\n\t" \ 579 ".insn\n\t" \ 580 "\t.section\t.fixup,\"ax\"\n\t" \ 581 "4:\tli\t%1, %3\n\t" \ 582 "j\t3b\n\t" \ 583 ".previous\n\t" \ 584 ".section\t__ex_table,\"a\"\n\t" \ 585 STR(PTR)"\t1b, 4b\n\t" \ 586 STR(PTR)"\t2b, 4b\n\t" \ 587 ".previous" \ 588 : "=&r" (value), "=r" (res) \ 589 : "r" (addr), "i" (-EFAULT)); 590 #else 591 /* MIPSR6 has not lwl and ldl instructions */ 592 #define LoadWU(addr, value, res) \ 593 __asm__ __volatile__ ( \ 594 ".set\tpush\n\t" \ 595 ".set\tnoat\n\t" \ 596 "1:"user_lbu("%0", "3(%2)")"\n\t" \ 597 "2:"user_lbu("$1", "2(%2)")"\n\t" \ 598 "sll\t%0, 0x8\n\t" \ 599 "or\t%0, $1\n\t" \ 600 "3:"user_lbu("$1", "1(%2)")"\n\t" \ 601 "sll\t%0, 0x8\n\t" \ 602 "or\t%0, $1\n\t" \ 603 "4:"user_lbu("$1", "0(%2)")"\n\t" \ 604 "sll\t%0, 0x8\n\t" \ 605 "or\t%0, $1\n\t" \ 606 "li\t%1, 0\n" \ 607 ".set\tpop\n" \ 608 "10:\n\t" \ 609 ".insn\n\t" \ 610 ".section\t.fixup,\"ax\"\n\t" \ 611 "11:\tli\t%1, %3\n\t" \ 612 "j\t10b\n\t" \ 613 ".previous\n\t" \ 614 ".section\t__ex_table,\"a\"\n\t" \ 615 STR(PTR)"\t1b, 11b\n\t" \ 616 STR(PTR)"\t2b, 11b\n\t" \ 617 STR(PTR)"\t3b, 11b\n\t" \ 618 STR(PTR)"\t4b, 11b\n\t" \ 619 ".previous" \ 620 : "=&r" (value), "=r" (res) \ 621 : "r" (addr), "i" (-EFAULT)); 622 623 #define LoadDW(addr, value, res) \ 624 __asm__ __volatile__ ( \ 625 ".set\tpush\n\t" \ 626 ".set\tnoat\n\t" \ 627 "1:lb\t%0, 7(%2)\n\t" \ 628 "2:lbu\t$1, 6(%2)\n\t" \ 629 "dsll\t%0, 0x8\n\t" \ 630 "or\t%0, $1\n\t" \ 631 "3:lbu\t$1, 5(%2)\n\t" \ 632 "dsll\t%0, 0x8\n\t" \ 633 "or\t%0, $1\n\t" \ 634 "4:lbu\t$1, 4(%2)\n\t" \ 635 "dsll\t%0, 0x8\n\t" \ 636 "or\t%0, $1\n\t" \ 637 "5:lbu\t$1, 3(%2)\n\t" \ 638 "dsll\t%0, 0x8\n\t" \ 639 "or\t%0, $1\n\t" \ 640 "6:lbu\t$1, 2(%2)\n\t" \ 641 "dsll\t%0, 0x8\n\t" \ 642 "or\t%0, $1\n\t" \ 643 "7:lbu\t$1, 1(%2)\n\t" \ 644 "dsll\t%0, 0x8\n\t" \ 645 "or\t%0, $1\n\t" \ 646 "8:lbu\t$1, 0(%2)\n\t" \ 647 "dsll\t%0, 0x8\n\t" \ 648 "or\t%0, $1\n\t" \ 649 "li\t%1, 0\n" \ 650 ".set\tpop\n\t" \ 651 "10:\n\t" \ 652 ".insn\n\t" \ 653 ".section\t.fixup,\"ax\"\n\t" \ 654 "11:\tli\t%1, %3\n\t" \ 655 "j\t10b\n\t" \ 656 ".previous\n\t" \ 657 ".section\t__ex_table,\"a\"\n\t" \ 658 STR(PTR)"\t1b, 11b\n\t" \ 659 STR(PTR)"\t2b, 11b\n\t" \ 660 STR(PTR)"\t3b, 11b\n\t" \ 661 STR(PTR)"\t4b, 11b\n\t" \ 662 STR(PTR)"\t5b, 11b\n\t" \ 663 STR(PTR)"\t6b, 11b\n\t" \ 664 STR(PTR)"\t7b, 11b\n\t" \ 665 STR(PTR)"\t8b, 11b\n\t" \ 666 ".previous" \ 667 : "=&r" (value), "=r" (res) \ 668 : "r" (addr), "i" (-EFAULT)); 669 #endif /* CONFIG_CPU_MIPSR6 */ 670 671 #define StoreHW(addr, value, res) \ 672 __asm__ __volatile__ ( \ 673 ".set\tnoat\n" \ 674 "1:\t"user_sb("%1", "0(%2)")"\n" \ 675 "srl\t$1,%1, 0x8\n" \ 676 "2:\t"user_sb("$1", "1(%2)")"\n" \ 677 ".set\tat\n\t" \ 678 "li\t%0, 0\n" \ 679 "3:\n\t" \ 680 ".insn\n\t" \ 681 ".section\t.fixup,\"ax\"\n\t" \ 682 "4:\tli\t%0, %3\n\t" \ 683 "j\t3b\n\t" \ 684 ".previous\n\t" \ 685 ".section\t__ex_table,\"a\"\n\t" \ 686 STR(PTR)"\t1b, 4b\n\t" \ 687 STR(PTR)"\t2b, 4b\n\t" \ 688 ".previous" \ 689 : "=r" (res) \ 690 : "r" (value), "r" (addr), "i" (-EFAULT)); 691 #ifndef CONFIG_CPU_MIPSR6 692 #define StoreW(addr, value, res) \ 693 __asm__ __volatile__ ( \ 694 "1:\t"user_swl("%1", "3(%2)")"\n" \ 695 "2:\t"user_swr("%1", "(%2)")"\n\t" \ 696 "li\t%0, 0\n" \ 697 "3:\n\t" \ 698 ".insn\n\t" \ 699 ".section\t.fixup,\"ax\"\n\t" \ 700 "4:\tli\t%0, %3\n\t" \ 701 "j\t3b\n\t" \ 702 ".previous\n\t" \ 703 ".section\t__ex_table,\"a\"\n\t" \ 704 STR(PTR)"\t1b, 4b\n\t" \ 705 STR(PTR)"\t2b, 4b\n\t" \ 706 ".previous" \ 707 : "=r" (res) \ 708 : "r" (value), "r" (addr), "i" (-EFAULT)); 709 710 #define StoreDW(addr, value, res) \ 711 __asm__ __volatile__ ( \ 712 "1:\tsdl\t%1, 7(%2)\n" \ 713 "2:\tsdr\t%1, (%2)\n\t" \ 714 "li\t%0, 0\n" \ 715 "3:\n\t" \ 716 ".insn\n\t" \ 717 ".section\t.fixup,\"ax\"\n\t" \ 718 "4:\tli\t%0, %3\n\t" \ 719 "j\t3b\n\t" \ 720 ".previous\n\t" \ 721 ".section\t__ex_table,\"a\"\n\t" \ 722 STR(PTR)"\t1b, 4b\n\t" \ 723 STR(PTR)"\t2b, 4b\n\t" \ 724 ".previous" \ 725 : "=r" (res) \ 726 : "r" (value), "r" (addr), "i" (-EFAULT)); 727 #else 728 /* MIPSR6 has no swl and sdl instructions */ 729 #define StoreW(addr, value, res) \ 730 __asm__ __volatile__ ( \ 731 ".set\tpush\n\t" \ 732 ".set\tnoat\n\t" \ 733 "1:"user_sb("%1", "0(%2)")"\n\t" \ 734 "srl\t$1, %1, 0x8\n\t" \ 735 "2:"user_sb("$1", "1(%2)")"\n\t" \ 736 "srl\t$1, $1, 0x8\n\t" \ 737 "3:"user_sb("$1", "2(%2)")"\n\t" \ 738 "srl\t$1, $1, 0x8\n\t" \ 739 "4:"user_sb("$1", "3(%2)")"\n\t" \ 740 ".set\tpop\n\t" \ 741 "li\t%0, 0\n" \ 742 "10:\n\t" \ 743 ".insn\n\t" \ 744 ".section\t.fixup,\"ax\"\n\t" \ 745 "11:\tli\t%0, %3\n\t" \ 746 "j\t10b\n\t" \ 747 ".previous\n\t" \ 748 ".section\t__ex_table,\"a\"\n\t" \ 749 STR(PTR)"\t1b, 11b\n\t" \ 750 STR(PTR)"\t2b, 11b\n\t" \ 751 STR(PTR)"\t3b, 11b\n\t" \ 752 STR(PTR)"\t4b, 11b\n\t" \ 753 ".previous" \ 754 : "=&r" (res) \ 755 : "r" (value), "r" (addr), "i" (-EFAULT) \ 756 : "memory"); 757 758 #define StoreDW(addr, value, res) \ 759 __asm__ __volatile__ ( \ 760 ".set\tpush\n\t" \ 761 ".set\tnoat\n\t" \ 762 "1:sb\t%1, 0(%2)\n\t" \ 763 "dsrl\t$1, %1, 0x8\n\t" \ 764 "2:sb\t$1, 1(%2)\n\t" \ 765 "dsrl\t$1, $1, 0x8\n\t" \ 766 "3:sb\t$1, 2(%2)\n\t" \ 767 "dsrl\t$1, $1, 0x8\n\t" \ 768 "4:sb\t$1, 3(%2)\n\t" \ 769 "dsrl\t$1, $1, 0x8\n\t" \ 770 "5:sb\t$1, 4(%2)\n\t" \ 771 "dsrl\t$1, $1, 0x8\n\t" \ 772 "6:sb\t$1, 5(%2)\n\t" \ 773 "dsrl\t$1, $1, 0x8\n\t" \ 774 "7:sb\t$1, 6(%2)\n\t" \ 775 "dsrl\t$1, $1, 0x8\n\t" \ 776 "8:sb\t$1, 7(%2)\n\t" \ 777 "dsrl\t$1, $1, 0x8\n\t" \ 778 ".set\tpop\n\t" \ 779 "li\t%0, 0\n" \ 780 "10:\n\t" \ 781 ".insn\n\t" \ 782 ".section\t.fixup,\"ax\"\n\t" \ 783 "11:\tli\t%0, %3\n\t" \ 784 "j\t10b\n\t" \ 785 ".previous\n\t" \ 786 ".section\t__ex_table,\"a\"\n\t" \ 787 STR(PTR)"\t1b, 11b\n\t" \ 788 STR(PTR)"\t2b, 11b\n\t" \ 789 STR(PTR)"\t3b, 11b\n\t" \ 790 STR(PTR)"\t4b, 11b\n\t" \ 791 STR(PTR)"\t5b, 11b\n\t" \ 792 STR(PTR)"\t6b, 11b\n\t" \ 793 STR(PTR)"\t7b, 11b\n\t" \ 794 STR(PTR)"\t8b, 11b\n\t" \ 795 ".previous" \ 796 : "=&r" (res) \ 797 : "r" (value), "r" (addr), "i" (-EFAULT) \ 798 : "memory"); 799 #endif /* CONFIG_CPU_MIPSR6 */ 800 #endif 801 802 static void emulate_load_store_insn(struct pt_regs *regs, 803 void __user *addr, unsigned int __user *pc) 804 { 805 union mips_instruction insn; 806 unsigned long value; 807 unsigned int res; 808 unsigned long origpc; 809 unsigned long orig31; 810 void __user *fault_addr = NULL; 811 #ifdef CONFIG_EVA 812 mm_segment_t seg; 813 #endif 814 origpc = (unsigned long)pc; 815 orig31 = regs->regs[31]; 816 817 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 818 819 /* 820 * This load never faults. 821 */ 822 __get_user(insn.word, pc); 823 824 switch (insn.i_format.opcode) { 825 /* 826 * These are instructions that a compiler doesn't generate. We 827 * can assume therefore that the code is MIPS-aware and 828 * really buggy. Emulating these instructions would break the 829 * semantics anyway. 830 */ 831 case ll_op: 832 case lld_op: 833 case sc_op: 834 case scd_op: 835 836 /* 837 * For these instructions the only way to create an address 838 * error is an attempted access to kernel/supervisor address 839 * space. 840 */ 841 case ldl_op: 842 case ldr_op: 843 case lwl_op: 844 case lwr_op: 845 case sdl_op: 846 case sdr_op: 847 case swl_op: 848 case swr_op: 849 case lb_op: 850 case lbu_op: 851 case sb_op: 852 goto sigbus; 853 854 /* 855 * The remaining opcodes are the ones that are really of 856 * interest. 857 */ 858 #ifdef CONFIG_EVA 859 case spec3_op: 860 /* 861 * we can land here only from kernel accessing user memory, 862 * so we need to "switch" the address limit to user space, so 863 * address check can work properly. 864 */ 865 seg = get_fs(); 866 set_fs(USER_DS); 867 switch (insn.spec3_format.func) { 868 case lhe_op: 869 if (!access_ok(VERIFY_READ, addr, 2)) { 870 set_fs(seg); 871 goto sigbus; 872 } 873 LoadHW(addr, value, res); 874 if (res) { 875 set_fs(seg); 876 goto fault; 877 } 878 compute_return_epc(regs); 879 regs->regs[insn.spec3_format.rt] = value; 880 break; 881 case lwe_op: 882 if (!access_ok(VERIFY_READ, addr, 4)) { 883 set_fs(seg); 884 goto sigbus; 885 } 886 LoadW(addr, value, res); 887 if (res) { 888 set_fs(seg); 889 goto fault; 890 } 891 compute_return_epc(regs); 892 regs->regs[insn.spec3_format.rt] = value; 893 break; 894 case lhue_op: 895 if (!access_ok(VERIFY_READ, addr, 2)) { 896 set_fs(seg); 897 goto sigbus; 898 } 899 LoadHWU(addr, value, res); 900 if (res) { 901 set_fs(seg); 902 goto fault; 903 } 904 compute_return_epc(regs); 905 regs->regs[insn.spec3_format.rt] = value; 906 break; 907 case she_op: 908 if (!access_ok(VERIFY_WRITE, addr, 2)) { 909 set_fs(seg); 910 goto sigbus; 911 } 912 compute_return_epc(regs); 913 value = regs->regs[insn.spec3_format.rt]; 914 StoreHW(addr, value, res); 915 if (res) { 916 set_fs(seg); 917 goto fault; 918 } 919 break; 920 case swe_op: 921 if (!access_ok(VERIFY_WRITE, addr, 4)) { 922 set_fs(seg); 923 goto sigbus; 924 } 925 compute_return_epc(regs); 926 value = regs->regs[insn.spec3_format.rt]; 927 StoreW(addr, value, res); 928 if (res) { 929 set_fs(seg); 930 goto fault; 931 } 932 break; 933 default: 934 set_fs(seg); 935 goto sigill; 936 } 937 set_fs(seg); 938 break; 939 #endif 940 case lh_op: 941 if (!access_ok(VERIFY_READ, addr, 2)) 942 goto sigbus; 943 944 LoadHW(addr, value, res); 945 if (res) 946 goto fault; 947 compute_return_epc(regs); 948 regs->regs[insn.i_format.rt] = value; 949 break; 950 951 case lw_op: 952 if (!access_ok(VERIFY_READ, addr, 4)) 953 goto sigbus; 954 955 LoadW(addr, value, res); 956 if (res) 957 goto fault; 958 compute_return_epc(regs); 959 regs->regs[insn.i_format.rt] = value; 960 break; 961 962 case lhu_op: 963 if (!access_ok(VERIFY_READ, addr, 2)) 964 goto sigbus; 965 966 LoadHWU(addr, value, res); 967 if (res) 968 goto fault; 969 compute_return_epc(regs); 970 regs->regs[insn.i_format.rt] = value; 971 break; 972 973 case lwu_op: 974 #ifdef CONFIG_64BIT 975 /* 976 * A 32-bit kernel might be running on a 64-bit processor. But 977 * if we're on a 32-bit processor and an i-cache incoherency 978 * or race makes us see a 64-bit instruction here the sdl/sdr 979 * would blow up, so for now we don't handle unaligned 64-bit 980 * instructions on 32-bit kernels. 981 */ 982 if (!access_ok(VERIFY_READ, addr, 4)) 983 goto sigbus; 984 985 LoadWU(addr, value, res); 986 if (res) 987 goto fault; 988 compute_return_epc(regs); 989 regs->regs[insn.i_format.rt] = value; 990 break; 991 #endif /* CONFIG_64BIT */ 992 993 /* Cannot handle 64-bit instructions in 32-bit kernel */ 994 goto sigill; 995 996 case ld_op: 997 #ifdef CONFIG_64BIT 998 /* 999 * A 32-bit kernel might be running on a 64-bit processor. But 1000 * if we're on a 32-bit processor and an i-cache incoherency 1001 * or race makes us see a 64-bit instruction here the sdl/sdr 1002 * would blow up, so for now we don't handle unaligned 64-bit 1003 * instructions on 32-bit kernels. 1004 */ 1005 if (!access_ok(VERIFY_READ, addr, 8)) 1006 goto sigbus; 1007 1008 LoadDW(addr, value, res); 1009 if (res) 1010 goto fault; 1011 compute_return_epc(regs); 1012 regs->regs[insn.i_format.rt] = value; 1013 break; 1014 #endif /* CONFIG_64BIT */ 1015 1016 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1017 goto sigill; 1018 1019 case sh_op: 1020 if (!access_ok(VERIFY_WRITE, addr, 2)) 1021 goto sigbus; 1022 1023 compute_return_epc(regs); 1024 value = regs->regs[insn.i_format.rt]; 1025 StoreHW(addr, value, res); 1026 if (res) 1027 goto fault; 1028 break; 1029 1030 case sw_op: 1031 if (!access_ok(VERIFY_WRITE, addr, 4)) 1032 goto sigbus; 1033 1034 compute_return_epc(regs); 1035 value = regs->regs[insn.i_format.rt]; 1036 StoreW(addr, value, res); 1037 if (res) 1038 goto fault; 1039 break; 1040 1041 case sd_op: 1042 #ifdef CONFIG_64BIT 1043 /* 1044 * A 32-bit kernel might be running on a 64-bit processor. But 1045 * if we're on a 32-bit processor and an i-cache incoherency 1046 * or race makes us see a 64-bit instruction here the sdl/sdr 1047 * would blow up, so for now we don't handle unaligned 64-bit 1048 * instructions on 32-bit kernels. 1049 */ 1050 if (!access_ok(VERIFY_WRITE, addr, 8)) 1051 goto sigbus; 1052 1053 compute_return_epc(regs); 1054 value = regs->regs[insn.i_format.rt]; 1055 StoreDW(addr, value, res); 1056 if (res) 1057 goto fault; 1058 break; 1059 #endif /* CONFIG_64BIT */ 1060 1061 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1062 goto sigill; 1063 1064 case lwc1_op: 1065 case ldc1_op: 1066 case swc1_op: 1067 case sdc1_op: 1068 die_if_kernel("Unaligned FP access in kernel code", regs); 1069 BUG_ON(!used_math()); 1070 1071 lose_fpu(1); /* Save FPU state for the emulator. */ 1072 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1073 &fault_addr); 1074 own_fpu(1); /* Restore FPU state. */ 1075 1076 /* Signal if something went wrong. */ 1077 process_fpemu_return(res, fault_addr, 0); 1078 1079 if (res == 0) 1080 break; 1081 return; 1082 1083 #ifndef CONFIG_CPU_MIPSR6 1084 /* 1085 * COP2 is available to implementor for application specific use. 1086 * It's up to applications to register a notifier chain and do 1087 * whatever they have to do, including possible sending of signals. 1088 * 1089 * This instruction has been reallocated in Release 6 1090 */ 1091 case lwc2_op: 1092 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1093 break; 1094 1095 case ldc2_op: 1096 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 1097 break; 1098 1099 case swc2_op: 1100 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 1101 break; 1102 1103 case sdc2_op: 1104 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1105 break; 1106 #endif 1107 default: 1108 /* 1109 * Pheeee... We encountered an yet unknown instruction or 1110 * cache coherence problem. Die sucker, die ... 1111 */ 1112 goto sigill; 1113 } 1114 1115 #ifdef CONFIG_DEBUG_FS 1116 unaligned_instructions++; 1117 #endif 1118 1119 return; 1120 1121 fault: 1122 /* roll back jump/branch */ 1123 regs->cp0_epc = origpc; 1124 regs->regs[31] = orig31; 1125 /* Did we have an exception handler installed? */ 1126 if (fixup_exception(regs)) 1127 return; 1128 1129 die_if_kernel("Unhandled kernel unaligned access", regs); 1130 force_sig(SIGSEGV, current); 1131 1132 return; 1133 1134 sigbus: 1135 die_if_kernel("Unhandled kernel unaligned access", regs); 1136 force_sig(SIGBUS, current); 1137 1138 return; 1139 1140 sigill: 1141 die_if_kernel 1142 ("Unhandled kernel unaligned access or invalid instruction", regs); 1143 force_sig(SIGILL, current); 1144 } 1145 1146 /* Recode table from 16-bit register notation to 32-bit GPR. */ 1147 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 1148 1149 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 1150 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 1151 1152 static void emulate_load_store_microMIPS(struct pt_regs *regs, 1153 void __user *addr) 1154 { 1155 unsigned long value; 1156 unsigned int res; 1157 int i; 1158 unsigned int reg = 0, rvar; 1159 unsigned long orig31; 1160 u16 __user *pc16; 1161 u16 halfword; 1162 unsigned int word; 1163 unsigned long origpc, contpc; 1164 union mips_instruction insn; 1165 struct mm_decoded_insn mminsn; 1166 void __user *fault_addr = NULL; 1167 1168 origpc = regs->cp0_epc; 1169 orig31 = regs->regs[31]; 1170 1171 mminsn.micro_mips_mode = 1; 1172 1173 /* 1174 * This load never faults. 1175 */ 1176 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 1177 __get_user(halfword, pc16); 1178 pc16++; 1179 contpc = regs->cp0_epc + 2; 1180 word = ((unsigned int)halfword << 16); 1181 mminsn.pc_inc = 2; 1182 1183 if (!mm_insn_16bit(halfword)) { 1184 __get_user(halfword, pc16); 1185 pc16++; 1186 contpc = regs->cp0_epc + 4; 1187 mminsn.pc_inc = 4; 1188 word |= halfword; 1189 } 1190 mminsn.insn = word; 1191 1192 if (get_user(halfword, pc16)) 1193 goto fault; 1194 mminsn.next_pc_inc = 2; 1195 word = ((unsigned int)halfword << 16); 1196 1197 if (!mm_insn_16bit(halfword)) { 1198 pc16++; 1199 if (get_user(halfword, pc16)) 1200 goto fault; 1201 mminsn.next_pc_inc = 4; 1202 word |= halfword; 1203 } 1204 mminsn.next_insn = word; 1205 1206 insn = (union mips_instruction)(mminsn.insn); 1207 if (mm_isBranchInstr(regs, mminsn, &contpc)) 1208 insn = (union mips_instruction)(mminsn.next_insn); 1209 1210 /* Parse instruction to find what to do */ 1211 1212 switch (insn.mm_i_format.opcode) { 1213 1214 case mm_pool32a_op: 1215 switch (insn.mm_x_format.func) { 1216 case mm_lwxs_op: 1217 reg = insn.mm_x_format.rd; 1218 goto loadW; 1219 } 1220 1221 goto sigbus; 1222 1223 case mm_pool32b_op: 1224 switch (insn.mm_m_format.func) { 1225 case mm_lwp_func: 1226 reg = insn.mm_m_format.rd; 1227 if (reg == 31) 1228 goto sigbus; 1229 1230 if (!access_ok(VERIFY_READ, addr, 8)) 1231 goto sigbus; 1232 1233 LoadW(addr, value, res); 1234 if (res) 1235 goto fault; 1236 regs->regs[reg] = value; 1237 addr += 4; 1238 LoadW(addr, value, res); 1239 if (res) 1240 goto fault; 1241 regs->regs[reg + 1] = value; 1242 goto success; 1243 1244 case mm_swp_func: 1245 reg = insn.mm_m_format.rd; 1246 if (reg == 31) 1247 goto sigbus; 1248 1249 if (!access_ok(VERIFY_WRITE, addr, 8)) 1250 goto sigbus; 1251 1252 value = regs->regs[reg]; 1253 StoreW(addr, value, res); 1254 if (res) 1255 goto fault; 1256 addr += 4; 1257 value = regs->regs[reg + 1]; 1258 StoreW(addr, value, res); 1259 if (res) 1260 goto fault; 1261 goto success; 1262 1263 case mm_ldp_func: 1264 #ifdef CONFIG_64BIT 1265 reg = insn.mm_m_format.rd; 1266 if (reg == 31) 1267 goto sigbus; 1268 1269 if (!access_ok(VERIFY_READ, addr, 16)) 1270 goto sigbus; 1271 1272 LoadDW(addr, value, res); 1273 if (res) 1274 goto fault; 1275 regs->regs[reg] = value; 1276 addr += 8; 1277 LoadDW(addr, value, res); 1278 if (res) 1279 goto fault; 1280 regs->regs[reg + 1] = value; 1281 goto success; 1282 #endif /* CONFIG_64BIT */ 1283 1284 goto sigill; 1285 1286 case mm_sdp_func: 1287 #ifdef CONFIG_64BIT 1288 reg = insn.mm_m_format.rd; 1289 if (reg == 31) 1290 goto sigbus; 1291 1292 if (!access_ok(VERIFY_WRITE, addr, 16)) 1293 goto sigbus; 1294 1295 value = regs->regs[reg]; 1296 StoreDW(addr, value, res); 1297 if (res) 1298 goto fault; 1299 addr += 8; 1300 value = regs->regs[reg + 1]; 1301 StoreDW(addr, value, res); 1302 if (res) 1303 goto fault; 1304 goto success; 1305 #endif /* CONFIG_64BIT */ 1306 1307 goto sigill; 1308 1309 case mm_lwm32_func: 1310 reg = insn.mm_m_format.rd; 1311 rvar = reg & 0xf; 1312 if ((rvar > 9) || !reg) 1313 goto sigill; 1314 if (reg & 0x10) { 1315 if (!access_ok 1316 (VERIFY_READ, addr, 4 * (rvar + 1))) 1317 goto sigbus; 1318 } else { 1319 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1320 goto sigbus; 1321 } 1322 if (rvar == 9) 1323 rvar = 8; 1324 for (i = 16; rvar; rvar--, i++) { 1325 LoadW(addr, value, res); 1326 if (res) 1327 goto fault; 1328 addr += 4; 1329 regs->regs[i] = value; 1330 } 1331 if ((reg & 0xf) == 9) { 1332 LoadW(addr, value, res); 1333 if (res) 1334 goto fault; 1335 addr += 4; 1336 regs->regs[30] = value; 1337 } 1338 if (reg & 0x10) { 1339 LoadW(addr, value, res); 1340 if (res) 1341 goto fault; 1342 regs->regs[31] = value; 1343 } 1344 goto success; 1345 1346 case mm_swm32_func: 1347 reg = insn.mm_m_format.rd; 1348 rvar = reg & 0xf; 1349 if ((rvar > 9) || !reg) 1350 goto sigill; 1351 if (reg & 0x10) { 1352 if (!access_ok 1353 (VERIFY_WRITE, addr, 4 * (rvar + 1))) 1354 goto sigbus; 1355 } else { 1356 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1357 goto sigbus; 1358 } 1359 if (rvar == 9) 1360 rvar = 8; 1361 for (i = 16; rvar; rvar--, i++) { 1362 value = regs->regs[i]; 1363 StoreW(addr, value, res); 1364 if (res) 1365 goto fault; 1366 addr += 4; 1367 } 1368 if ((reg & 0xf) == 9) { 1369 value = regs->regs[30]; 1370 StoreW(addr, value, res); 1371 if (res) 1372 goto fault; 1373 addr += 4; 1374 } 1375 if (reg & 0x10) { 1376 value = regs->regs[31]; 1377 StoreW(addr, value, res); 1378 if (res) 1379 goto fault; 1380 } 1381 goto success; 1382 1383 case mm_ldm_func: 1384 #ifdef CONFIG_64BIT 1385 reg = insn.mm_m_format.rd; 1386 rvar = reg & 0xf; 1387 if ((rvar > 9) || !reg) 1388 goto sigill; 1389 if (reg & 0x10) { 1390 if (!access_ok 1391 (VERIFY_READ, addr, 8 * (rvar + 1))) 1392 goto sigbus; 1393 } else { 1394 if (!access_ok(VERIFY_READ, addr, 8 * rvar)) 1395 goto sigbus; 1396 } 1397 if (rvar == 9) 1398 rvar = 8; 1399 1400 for (i = 16; rvar; rvar--, i++) { 1401 LoadDW(addr, value, res); 1402 if (res) 1403 goto fault; 1404 addr += 4; 1405 regs->regs[i] = value; 1406 } 1407 if ((reg & 0xf) == 9) { 1408 LoadDW(addr, value, res); 1409 if (res) 1410 goto fault; 1411 addr += 8; 1412 regs->regs[30] = value; 1413 } 1414 if (reg & 0x10) { 1415 LoadDW(addr, value, res); 1416 if (res) 1417 goto fault; 1418 regs->regs[31] = value; 1419 } 1420 goto success; 1421 #endif /* CONFIG_64BIT */ 1422 1423 goto sigill; 1424 1425 case mm_sdm_func: 1426 #ifdef CONFIG_64BIT 1427 reg = insn.mm_m_format.rd; 1428 rvar = reg & 0xf; 1429 if ((rvar > 9) || !reg) 1430 goto sigill; 1431 if (reg & 0x10) { 1432 if (!access_ok 1433 (VERIFY_WRITE, addr, 8 * (rvar + 1))) 1434 goto sigbus; 1435 } else { 1436 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) 1437 goto sigbus; 1438 } 1439 if (rvar == 9) 1440 rvar = 8; 1441 1442 for (i = 16; rvar; rvar--, i++) { 1443 value = regs->regs[i]; 1444 StoreDW(addr, value, res); 1445 if (res) 1446 goto fault; 1447 addr += 8; 1448 } 1449 if ((reg & 0xf) == 9) { 1450 value = regs->regs[30]; 1451 StoreDW(addr, value, res); 1452 if (res) 1453 goto fault; 1454 addr += 8; 1455 } 1456 if (reg & 0x10) { 1457 value = regs->regs[31]; 1458 StoreDW(addr, value, res); 1459 if (res) 1460 goto fault; 1461 } 1462 goto success; 1463 #endif /* CONFIG_64BIT */ 1464 1465 goto sigill; 1466 1467 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 1468 } 1469 1470 goto sigbus; 1471 1472 case mm_pool32c_op: 1473 switch (insn.mm_m_format.func) { 1474 case mm_lwu_func: 1475 reg = insn.mm_m_format.rd; 1476 goto loadWU; 1477 } 1478 1479 /* LL,SC,LLD,SCD are not serviced */ 1480 goto sigbus; 1481 1482 case mm_pool32f_op: 1483 switch (insn.mm_x_format.func) { 1484 case mm_lwxc1_func: 1485 case mm_swxc1_func: 1486 case mm_ldxc1_func: 1487 case mm_sdxc1_func: 1488 goto fpu_emul; 1489 } 1490 1491 goto sigbus; 1492 1493 case mm_ldc132_op: 1494 case mm_sdc132_op: 1495 case mm_lwc132_op: 1496 case mm_swc132_op: 1497 fpu_emul: 1498 /* roll back jump/branch */ 1499 regs->cp0_epc = origpc; 1500 regs->regs[31] = orig31; 1501 1502 die_if_kernel("Unaligned FP access in kernel code", regs); 1503 BUG_ON(!used_math()); 1504 BUG_ON(!is_fpu_owner()); 1505 1506 lose_fpu(1); /* save the FPU state for the emulator */ 1507 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1508 &fault_addr); 1509 own_fpu(1); /* restore FPU state */ 1510 1511 /* If something went wrong, signal */ 1512 process_fpemu_return(res, fault_addr, 0); 1513 1514 if (res == 0) 1515 goto success; 1516 return; 1517 1518 case mm_lh32_op: 1519 reg = insn.mm_i_format.rt; 1520 goto loadHW; 1521 1522 case mm_lhu32_op: 1523 reg = insn.mm_i_format.rt; 1524 goto loadHWU; 1525 1526 case mm_lw32_op: 1527 reg = insn.mm_i_format.rt; 1528 goto loadW; 1529 1530 case mm_sh32_op: 1531 reg = insn.mm_i_format.rt; 1532 goto storeHW; 1533 1534 case mm_sw32_op: 1535 reg = insn.mm_i_format.rt; 1536 goto storeW; 1537 1538 case mm_ld32_op: 1539 reg = insn.mm_i_format.rt; 1540 goto loadDW; 1541 1542 case mm_sd32_op: 1543 reg = insn.mm_i_format.rt; 1544 goto storeDW; 1545 1546 case mm_pool16c_op: 1547 switch (insn.mm16_m_format.func) { 1548 case mm_lwm16_op: 1549 reg = insn.mm16_m_format.rlist; 1550 rvar = reg + 1; 1551 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1552 goto sigbus; 1553 1554 for (i = 16; rvar; rvar--, i++) { 1555 LoadW(addr, value, res); 1556 if (res) 1557 goto fault; 1558 addr += 4; 1559 regs->regs[i] = value; 1560 } 1561 LoadW(addr, value, res); 1562 if (res) 1563 goto fault; 1564 regs->regs[31] = value; 1565 1566 goto success; 1567 1568 case mm_swm16_op: 1569 reg = insn.mm16_m_format.rlist; 1570 rvar = reg + 1; 1571 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1572 goto sigbus; 1573 1574 for (i = 16; rvar; rvar--, i++) { 1575 value = regs->regs[i]; 1576 StoreW(addr, value, res); 1577 if (res) 1578 goto fault; 1579 addr += 4; 1580 } 1581 value = regs->regs[31]; 1582 StoreW(addr, value, res); 1583 if (res) 1584 goto fault; 1585 1586 goto success; 1587 1588 } 1589 1590 goto sigbus; 1591 1592 case mm_lhu16_op: 1593 reg = reg16to32[insn.mm16_rb_format.rt]; 1594 goto loadHWU; 1595 1596 case mm_lw16_op: 1597 reg = reg16to32[insn.mm16_rb_format.rt]; 1598 goto loadW; 1599 1600 case mm_sh16_op: 1601 reg = reg16to32st[insn.mm16_rb_format.rt]; 1602 goto storeHW; 1603 1604 case mm_sw16_op: 1605 reg = reg16to32st[insn.mm16_rb_format.rt]; 1606 goto storeW; 1607 1608 case mm_lwsp16_op: 1609 reg = insn.mm16_r5_format.rt; 1610 goto loadW; 1611 1612 case mm_swsp16_op: 1613 reg = insn.mm16_r5_format.rt; 1614 goto storeW; 1615 1616 case mm_lwgp16_op: 1617 reg = reg16to32[insn.mm16_r3_format.rt]; 1618 goto loadW; 1619 1620 default: 1621 goto sigill; 1622 } 1623 1624 loadHW: 1625 if (!access_ok(VERIFY_READ, addr, 2)) 1626 goto sigbus; 1627 1628 LoadHW(addr, value, res); 1629 if (res) 1630 goto fault; 1631 regs->regs[reg] = value; 1632 goto success; 1633 1634 loadHWU: 1635 if (!access_ok(VERIFY_READ, addr, 2)) 1636 goto sigbus; 1637 1638 LoadHWU(addr, value, res); 1639 if (res) 1640 goto fault; 1641 regs->regs[reg] = value; 1642 goto success; 1643 1644 loadW: 1645 if (!access_ok(VERIFY_READ, addr, 4)) 1646 goto sigbus; 1647 1648 LoadW(addr, value, res); 1649 if (res) 1650 goto fault; 1651 regs->regs[reg] = value; 1652 goto success; 1653 1654 loadWU: 1655 #ifdef CONFIG_64BIT 1656 /* 1657 * A 32-bit kernel might be running on a 64-bit processor. But 1658 * if we're on a 32-bit processor and an i-cache incoherency 1659 * or race makes us see a 64-bit instruction here the sdl/sdr 1660 * would blow up, so for now we don't handle unaligned 64-bit 1661 * instructions on 32-bit kernels. 1662 */ 1663 if (!access_ok(VERIFY_READ, addr, 4)) 1664 goto sigbus; 1665 1666 LoadWU(addr, value, res); 1667 if (res) 1668 goto fault; 1669 regs->regs[reg] = value; 1670 goto success; 1671 #endif /* CONFIG_64BIT */ 1672 1673 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1674 goto sigill; 1675 1676 loadDW: 1677 #ifdef CONFIG_64BIT 1678 /* 1679 * A 32-bit kernel might be running on a 64-bit processor. But 1680 * if we're on a 32-bit processor and an i-cache incoherency 1681 * or race makes us see a 64-bit instruction here the sdl/sdr 1682 * would blow up, so for now we don't handle unaligned 64-bit 1683 * instructions on 32-bit kernels. 1684 */ 1685 if (!access_ok(VERIFY_READ, addr, 8)) 1686 goto sigbus; 1687 1688 LoadDW(addr, value, res); 1689 if (res) 1690 goto fault; 1691 regs->regs[reg] = value; 1692 goto success; 1693 #endif /* CONFIG_64BIT */ 1694 1695 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1696 goto sigill; 1697 1698 storeHW: 1699 if (!access_ok(VERIFY_WRITE, addr, 2)) 1700 goto sigbus; 1701 1702 value = regs->regs[reg]; 1703 StoreHW(addr, value, res); 1704 if (res) 1705 goto fault; 1706 goto success; 1707 1708 storeW: 1709 if (!access_ok(VERIFY_WRITE, addr, 4)) 1710 goto sigbus; 1711 1712 value = regs->regs[reg]; 1713 StoreW(addr, value, res); 1714 if (res) 1715 goto fault; 1716 goto success; 1717 1718 storeDW: 1719 #ifdef CONFIG_64BIT 1720 /* 1721 * A 32-bit kernel might be running on a 64-bit processor. But 1722 * if we're on a 32-bit processor and an i-cache incoherency 1723 * or race makes us see a 64-bit instruction here the sdl/sdr 1724 * would blow up, so for now we don't handle unaligned 64-bit 1725 * instructions on 32-bit kernels. 1726 */ 1727 if (!access_ok(VERIFY_WRITE, addr, 8)) 1728 goto sigbus; 1729 1730 value = regs->regs[reg]; 1731 StoreDW(addr, value, res); 1732 if (res) 1733 goto fault; 1734 goto success; 1735 #endif /* CONFIG_64BIT */ 1736 1737 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1738 goto sigill; 1739 1740 success: 1741 regs->cp0_epc = contpc; /* advance or branch */ 1742 1743 #ifdef CONFIG_DEBUG_FS 1744 unaligned_instructions++; 1745 #endif 1746 return; 1747 1748 fault: 1749 /* roll back jump/branch */ 1750 regs->cp0_epc = origpc; 1751 regs->regs[31] = orig31; 1752 /* Did we have an exception handler installed? */ 1753 if (fixup_exception(regs)) 1754 return; 1755 1756 die_if_kernel("Unhandled kernel unaligned access", regs); 1757 force_sig(SIGSEGV, current); 1758 1759 return; 1760 1761 sigbus: 1762 die_if_kernel("Unhandled kernel unaligned access", regs); 1763 force_sig(SIGBUS, current); 1764 1765 return; 1766 1767 sigill: 1768 die_if_kernel 1769 ("Unhandled kernel unaligned access or invalid instruction", regs); 1770 force_sig(SIGILL, current); 1771 } 1772 1773 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 1774 { 1775 unsigned long value; 1776 unsigned int res; 1777 int reg; 1778 unsigned long orig31; 1779 u16 __user *pc16; 1780 unsigned long origpc; 1781 union mips16e_instruction mips16inst, oldinst; 1782 1783 origpc = regs->cp0_epc; 1784 orig31 = regs->regs[31]; 1785 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 1786 /* 1787 * This load never faults. 1788 */ 1789 __get_user(mips16inst.full, pc16); 1790 oldinst = mips16inst; 1791 1792 /* skip EXTEND instruction */ 1793 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 1794 pc16++; 1795 __get_user(mips16inst.full, pc16); 1796 } else if (delay_slot(regs)) { 1797 /* skip jump instructions */ 1798 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 1799 if (mips16inst.ri.opcode == MIPS16e_jal_op) 1800 pc16++; 1801 pc16++; 1802 if (get_user(mips16inst.full, pc16)) 1803 goto sigbus; 1804 } 1805 1806 switch (mips16inst.ri.opcode) { 1807 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 1808 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 1809 case MIPS16e_ldpc_func: 1810 case MIPS16e_ldsp_func: 1811 reg = reg16to32[mips16inst.ri64.ry]; 1812 goto loadDW; 1813 1814 case MIPS16e_sdsp_func: 1815 reg = reg16to32[mips16inst.ri64.ry]; 1816 goto writeDW; 1817 1818 case MIPS16e_sdrasp_func: 1819 reg = 29; /* GPRSP */ 1820 goto writeDW; 1821 } 1822 1823 goto sigbus; 1824 1825 case MIPS16e_swsp_op: 1826 case MIPS16e_lwpc_op: 1827 case MIPS16e_lwsp_op: 1828 reg = reg16to32[mips16inst.ri.rx]; 1829 break; 1830 1831 case MIPS16e_i8_op: 1832 if (mips16inst.i8.func != MIPS16e_swrasp_func) 1833 goto sigbus; 1834 reg = 29; /* GPRSP */ 1835 break; 1836 1837 default: 1838 reg = reg16to32[mips16inst.rri.ry]; 1839 break; 1840 } 1841 1842 switch (mips16inst.ri.opcode) { 1843 1844 case MIPS16e_lb_op: 1845 case MIPS16e_lbu_op: 1846 case MIPS16e_sb_op: 1847 goto sigbus; 1848 1849 case MIPS16e_lh_op: 1850 if (!access_ok(VERIFY_READ, addr, 2)) 1851 goto sigbus; 1852 1853 LoadHW(addr, value, res); 1854 if (res) 1855 goto fault; 1856 MIPS16e_compute_return_epc(regs, &oldinst); 1857 regs->regs[reg] = value; 1858 break; 1859 1860 case MIPS16e_lhu_op: 1861 if (!access_ok(VERIFY_READ, addr, 2)) 1862 goto sigbus; 1863 1864 LoadHWU(addr, value, res); 1865 if (res) 1866 goto fault; 1867 MIPS16e_compute_return_epc(regs, &oldinst); 1868 regs->regs[reg] = value; 1869 break; 1870 1871 case MIPS16e_lw_op: 1872 case MIPS16e_lwpc_op: 1873 case MIPS16e_lwsp_op: 1874 if (!access_ok(VERIFY_READ, addr, 4)) 1875 goto sigbus; 1876 1877 LoadW(addr, value, res); 1878 if (res) 1879 goto fault; 1880 MIPS16e_compute_return_epc(regs, &oldinst); 1881 regs->regs[reg] = value; 1882 break; 1883 1884 case MIPS16e_lwu_op: 1885 #ifdef CONFIG_64BIT 1886 /* 1887 * A 32-bit kernel might be running on a 64-bit processor. But 1888 * if we're on a 32-bit processor and an i-cache incoherency 1889 * or race makes us see a 64-bit instruction here the sdl/sdr 1890 * would blow up, so for now we don't handle unaligned 64-bit 1891 * instructions on 32-bit kernels. 1892 */ 1893 if (!access_ok(VERIFY_READ, addr, 4)) 1894 goto sigbus; 1895 1896 LoadWU(addr, value, res); 1897 if (res) 1898 goto fault; 1899 MIPS16e_compute_return_epc(regs, &oldinst); 1900 regs->regs[reg] = value; 1901 break; 1902 #endif /* CONFIG_64BIT */ 1903 1904 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1905 goto sigill; 1906 1907 case MIPS16e_ld_op: 1908 loadDW: 1909 #ifdef CONFIG_64BIT 1910 /* 1911 * A 32-bit kernel might be running on a 64-bit processor. But 1912 * if we're on a 32-bit processor and an i-cache incoherency 1913 * or race makes us see a 64-bit instruction here the sdl/sdr 1914 * would blow up, so for now we don't handle unaligned 64-bit 1915 * instructions on 32-bit kernels. 1916 */ 1917 if (!access_ok(VERIFY_READ, addr, 8)) 1918 goto sigbus; 1919 1920 LoadDW(addr, value, res); 1921 if (res) 1922 goto fault; 1923 MIPS16e_compute_return_epc(regs, &oldinst); 1924 regs->regs[reg] = value; 1925 break; 1926 #endif /* CONFIG_64BIT */ 1927 1928 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1929 goto sigill; 1930 1931 case MIPS16e_sh_op: 1932 if (!access_ok(VERIFY_WRITE, addr, 2)) 1933 goto sigbus; 1934 1935 MIPS16e_compute_return_epc(regs, &oldinst); 1936 value = regs->regs[reg]; 1937 StoreHW(addr, value, res); 1938 if (res) 1939 goto fault; 1940 break; 1941 1942 case MIPS16e_sw_op: 1943 case MIPS16e_swsp_op: 1944 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 1945 if (!access_ok(VERIFY_WRITE, addr, 4)) 1946 goto sigbus; 1947 1948 MIPS16e_compute_return_epc(regs, &oldinst); 1949 value = regs->regs[reg]; 1950 StoreW(addr, value, res); 1951 if (res) 1952 goto fault; 1953 break; 1954 1955 case MIPS16e_sd_op: 1956 writeDW: 1957 #ifdef CONFIG_64BIT 1958 /* 1959 * A 32-bit kernel might be running on a 64-bit processor. But 1960 * if we're on a 32-bit processor and an i-cache incoherency 1961 * or race makes us see a 64-bit instruction here the sdl/sdr 1962 * would blow up, so for now we don't handle unaligned 64-bit 1963 * instructions on 32-bit kernels. 1964 */ 1965 if (!access_ok(VERIFY_WRITE, addr, 8)) 1966 goto sigbus; 1967 1968 MIPS16e_compute_return_epc(regs, &oldinst); 1969 value = regs->regs[reg]; 1970 StoreDW(addr, value, res); 1971 if (res) 1972 goto fault; 1973 break; 1974 #endif /* CONFIG_64BIT */ 1975 1976 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1977 goto sigill; 1978 1979 default: 1980 /* 1981 * Pheeee... We encountered an yet unknown instruction or 1982 * cache coherence problem. Die sucker, die ... 1983 */ 1984 goto sigill; 1985 } 1986 1987 #ifdef CONFIG_DEBUG_FS 1988 unaligned_instructions++; 1989 #endif 1990 1991 return; 1992 1993 fault: 1994 /* roll back jump/branch */ 1995 regs->cp0_epc = origpc; 1996 regs->regs[31] = orig31; 1997 /* Did we have an exception handler installed? */ 1998 if (fixup_exception(regs)) 1999 return; 2000 2001 die_if_kernel("Unhandled kernel unaligned access", regs); 2002 force_sig(SIGSEGV, current); 2003 2004 return; 2005 2006 sigbus: 2007 die_if_kernel("Unhandled kernel unaligned access", regs); 2008 force_sig(SIGBUS, current); 2009 2010 return; 2011 2012 sigill: 2013 die_if_kernel 2014 ("Unhandled kernel unaligned access or invalid instruction", regs); 2015 force_sig(SIGILL, current); 2016 } 2017 2018 asmlinkage void do_ade(struct pt_regs *regs) 2019 { 2020 enum ctx_state prev_state; 2021 unsigned int __user *pc; 2022 mm_segment_t seg; 2023 2024 prev_state = exception_enter(); 2025 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 2026 1, regs, regs->cp0_badvaddr); 2027 /* 2028 * Did we catch a fault trying to load an instruction? 2029 */ 2030 if (regs->cp0_badvaddr == regs->cp0_epc) 2031 goto sigbus; 2032 2033 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 2034 goto sigbus; 2035 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 2036 goto sigbus; 2037 2038 /* 2039 * Do branch emulation only if we didn't forward the exception. 2040 * This is all so but ugly ... 2041 */ 2042 2043 /* 2044 * Are we running in microMIPS mode? 2045 */ 2046 if (get_isa16_mode(regs->cp0_epc)) { 2047 /* 2048 * Did we catch a fault trying to load an instruction in 2049 * 16-bit mode? 2050 */ 2051 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 2052 goto sigbus; 2053 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2054 show_registers(regs); 2055 2056 if (cpu_has_mmips) { 2057 seg = get_fs(); 2058 if (!user_mode(regs)) 2059 set_fs(KERNEL_DS); 2060 emulate_load_store_microMIPS(regs, 2061 (void __user *)regs->cp0_badvaddr); 2062 set_fs(seg); 2063 2064 return; 2065 } 2066 2067 if (cpu_has_mips16) { 2068 seg = get_fs(); 2069 if (!user_mode(regs)) 2070 set_fs(KERNEL_DS); 2071 emulate_load_store_MIPS16e(regs, 2072 (void __user *)regs->cp0_badvaddr); 2073 set_fs(seg); 2074 2075 return; 2076 } 2077 2078 goto sigbus; 2079 } 2080 2081 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2082 show_registers(regs); 2083 pc = (unsigned int __user *)exception_epc(regs); 2084 2085 seg = get_fs(); 2086 if (!user_mode(regs)) 2087 set_fs(KERNEL_DS); 2088 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 2089 set_fs(seg); 2090 2091 return; 2092 2093 sigbus: 2094 die_if_kernel("Kernel unaligned instruction access", regs); 2095 force_sig(SIGBUS, current); 2096 2097 /* 2098 * XXX On return from the signal handler we should advance the epc 2099 */ 2100 exception_exit(prev_state); 2101 } 2102 2103 #ifdef CONFIG_DEBUG_FS 2104 extern struct dentry *mips_debugfs_dir; 2105 static int __init debugfs_unaligned(void) 2106 { 2107 struct dentry *d; 2108 2109 if (!mips_debugfs_dir) 2110 return -ENODEV; 2111 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 2112 mips_debugfs_dir, &unaligned_instructions); 2113 if (!d) 2114 return -ENOMEM; 2115 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 2116 mips_debugfs_dir, &unaligned_action); 2117 if (!d) 2118 return -ENOMEM; 2119 return 0; 2120 } 2121 __initcall(debugfs_unaligned); 2122 #endif 2123