1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2014 Imagination Technologies Ltd. 7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> 8 * Author: Markos Chandras <markos.chandras@imgtec.com> 9 * 10 * MIPS R2 user space instruction emulator for MIPS R6 11 * 12 */ 13 #include <linux/bug.h> 14 #include <linux/compiler.h> 15 #include <linux/debugfs.h> 16 #include <linux/init.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/ptrace.h> 20 #include <linux/seq_file.h> 21 22 #include <asm/asm.h> 23 #include <asm/branch.h> 24 #include <asm/break.h> 25 #include <asm/debug.h> 26 #include <asm/fpu.h> 27 #include <asm/fpu_emulator.h> 28 #include <asm/inst.h> 29 #include <asm/mips-r2-to-r6-emul.h> 30 #include <asm/local.h> 31 #include <asm/mipsregs.h> 32 #include <asm/ptrace.h> 33 #include <asm/uaccess.h> 34 35 #ifdef CONFIG_64BIT 36 #define ADDIU "daddiu " 37 #define INS "dins " 38 #define EXT "dext " 39 #else 40 #define ADDIU "addiu " 41 #define INS "ins " 42 #define EXT "ext " 43 #endif /* CONFIG_64BIT */ 44 45 #define SB "sb " 46 #define LB "lb " 47 #define LL "ll " 48 #define SC "sc " 49 50 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); 51 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); 52 DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); 53 54 extern const unsigned int fpucondbit[8]; 55 56 #define MIPS_R2_EMUL_TOTAL_PASS 10 57 58 int mipsr2_emulation = 0; 59 60 static int __init mipsr2emu_enable(char *s) 61 { 62 mipsr2_emulation = 1; 63 64 pr_info("MIPS R2-to-R6 Emulator Enabled!"); 65 66 return 1; 67 } 68 __setup("mipsr2emu", mipsr2emu_enable); 69 70 /** 71 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot 72 * for performance instead of the traditional way of using a stack trampoline 73 * which is rather slow. 74 * @regs: Process register set 75 * @ir: Instruction 76 */ 77 static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) 78 { 79 switch (MIPSInst_OPCODE(ir)) { 80 case addiu_op: 81 if (MIPSInst_RT(ir)) 82 regs->regs[MIPSInst_RT(ir)] = 83 (s32)regs->regs[MIPSInst_RS(ir)] + 84 (s32)MIPSInst_SIMM(ir); 85 return 0; 86 case daddiu_op: 87 if (IS_ENABLED(CONFIG_32BIT)) 88 break; 89 90 if (MIPSInst_RT(ir)) 91 regs->regs[MIPSInst_RT(ir)] = 92 (s64)regs->regs[MIPSInst_RS(ir)] + 93 (s64)MIPSInst_SIMM(ir); 94 return 0; 95 case lwc1_op: 96 case swc1_op: 97 case cop1_op: 98 case cop1x_op: 99 /* FPU instructions in delay slot */ 100 return -SIGFPE; 101 case spec_op: 102 switch (MIPSInst_FUNC(ir)) { 103 case or_op: 104 if (MIPSInst_RD(ir)) 105 regs->regs[MIPSInst_RD(ir)] = 106 regs->regs[MIPSInst_RS(ir)] | 107 regs->regs[MIPSInst_RT(ir)]; 108 return 0; 109 case sll_op: 110 if (MIPSInst_RS(ir)) 111 break; 112 113 if (MIPSInst_RD(ir)) 114 regs->regs[MIPSInst_RD(ir)] = 115 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << 116 MIPSInst_FD(ir)); 117 return 0; 118 case srl_op: 119 if (MIPSInst_RS(ir)) 120 break; 121 122 if (MIPSInst_RD(ir)) 123 regs->regs[MIPSInst_RD(ir)] = 124 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> 125 MIPSInst_FD(ir)); 126 return 0; 127 case addu_op: 128 if (MIPSInst_FD(ir)) 129 break; 130 131 if (MIPSInst_RD(ir)) 132 regs->regs[MIPSInst_RD(ir)] = 133 (s32)((u32)regs->regs[MIPSInst_RS(ir)] + 134 (u32)regs->regs[MIPSInst_RT(ir)]); 135 return 0; 136 case subu_op: 137 if (MIPSInst_FD(ir)) 138 break; 139 140 if (MIPSInst_RD(ir)) 141 regs->regs[MIPSInst_RD(ir)] = 142 (s32)((u32)regs->regs[MIPSInst_RS(ir)] - 143 (u32)regs->regs[MIPSInst_RT(ir)]); 144 return 0; 145 case dsll_op: 146 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) 147 break; 148 149 if (MIPSInst_RD(ir)) 150 regs->regs[MIPSInst_RD(ir)] = 151 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << 152 MIPSInst_FD(ir)); 153 return 0; 154 case dsrl_op: 155 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) 156 break; 157 158 if (MIPSInst_RD(ir)) 159 regs->regs[MIPSInst_RD(ir)] = 160 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> 161 MIPSInst_FD(ir)); 162 return 0; 163 case daddu_op: 164 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) 165 break; 166 167 if (MIPSInst_RD(ir)) 168 regs->regs[MIPSInst_RD(ir)] = 169 (u64)regs->regs[MIPSInst_RS(ir)] + 170 (u64)regs->regs[MIPSInst_RT(ir)]; 171 return 0; 172 case dsubu_op: 173 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) 174 break; 175 176 if (MIPSInst_RD(ir)) 177 regs->regs[MIPSInst_RD(ir)] = 178 (s64)((u64)regs->regs[MIPSInst_RS(ir)] - 179 (u64)regs->regs[MIPSInst_RT(ir)]); 180 return 0; 181 } 182 break; 183 default: 184 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", 185 ir, MIPSInst_OPCODE(ir)); 186 } 187 188 return SIGILL; 189 } 190 191 /** 192 * movf_func - Emulate a MOVF instruction 193 * @regs: Process register set 194 * @ir: Instruction 195 * 196 * Returns 0 since it always succeeds. 197 */ 198 static int movf_func(struct pt_regs *regs, u32 ir) 199 { 200 u32 csr; 201 u32 cond; 202 203 csr = current->thread.fpu.fcr31; 204 cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 205 206 if (((csr & cond) == 0) && MIPSInst_RD(ir)) 207 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 208 209 MIPS_R2_STATS(movs); 210 211 return 0; 212 } 213 214 /** 215 * movt_func - Emulate a MOVT instruction 216 * @regs: Process register set 217 * @ir: Instruction 218 * 219 * Returns 0 since it always succeeds. 220 */ 221 static int movt_func(struct pt_regs *regs, u32 ir) 222 { 223 u32 csr; 224 u32 cond; 225 226 csr = current->thread.fpu.fcr31; 227 cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 228 229 if (((csr & cond) != 0) && MIPSInst_RD(ir)) 230 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 231 232 MIPS_R2_STATS(movs); 233 234 return 0; 235 } 236 237 /** 238 * jr_func - Emulate a JR instruction. 239 * @pt_regs: Process register set 240 * @ir: Instruction 241 * 242 * Returns SIGILL if JR was in delay slot, SIGEMT if we 243 * can't compute the EPC, SIGSEGV if we can't access the 244 * userland instruction or 0 on success. 245 */ 246 static int jr_func(struct pt_regs *regs, u32 ir) 247 { 248 int err; 249 unsigned long cepc, epc, nepc; 250 u32 nir; 251 252 if (delay_slot(regs)) 253 return SIGILL; 254 255 /* EPC after the RI/JR instruction */ 256 nepc = regs->cp0_epc; 257 /* Roll back to the reserved R2 JR instruction */ 258 regs->cp0_epc -= 4; 259 epc = regs->cp0_epc; 260 err = __compute_return_epc(regs); 261 262 if (err < 0) 263 return SIGEMT; 264 265 266 /* Computed EPC */ 267 cepc = regs->cp0_epc; 268 269 /* Get DS instruction */ 270 err = __get_user(nir, (u32 __user *)nepc); 271 if (err) 272 return SIGSEGV; 273 274 MIPS_R2BR_STATS(jrs); 275 276 /* If nir == 0(NOP), then nothing else to do */ 277 if (nir) { 278 /* 279 * Negative err means FPU instruction in BD-slot, 280 * Zero err means 'BD-slot emulation done' 281 * For anything else we go back to trampoline emulation. 282 */ 283 err = mipsr6_emul(regs, nir); 284 if (err > 0) { 285 regs->cp0_epc = nepc; 286 err = mips_dsemul(regs, nir, epc, cepc); 287 if (err == SIGILL) 288 err = SIGEMT; 289 MIPS_R2_STATS(dsemul); 290 } 291 } 292 293 return err; 294 } 295 296 /** 297 * movz_func - Emulate a MOVZ instruction 298 * @regs: Process register set 299 * @ir: Instruction 300 * 301 * Returns 0 since it always succeeds. 302 */ 303 static int movz_func(struct pt_regs *regs, u32 ir) 304 { 305 if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) 306 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 307 MIPS_R2_STATS(movs); 308 309 return 0; 310 } 311 312 /** 313 * movn_func - Emulate a MOVZ instruction 314 * @regs: Process register set 315 * @ir: Instruction 316 * 317 * Returns 0 since it always succeeds. 318 */ 319 static int movn_func(struct pt_regs *regs, u32 ir) 320 { 321 if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) 322 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 323 MIPS_R2_STATS(movs); 324 325 return 0; 326 } 327 328 /** 329 * mfhi_func - Emulate a MFHI instruction 330 * @regs: Process register set 331 * @ir: Instruction 332 * 333 * Returns 0 since it always succeeds. 334 */ 335 static int mfhi_func(struct pt_regs *regs, u32 ir) 336 { 337 if (MIPSInst_RD(ir)) 338 regs->regs[MIPSInst_RD(ir)] = regs->hi; 339 340 MIPS_R2_STATS(hilo); 341 342 return 0; 343 } 344 345 /** 346 * mthi_func - Emulate a MTHI instruction 347 * @regs: Process register set 348 * @ir: Instruction 349 * 350 * Returns 0 since it always succeeds. 351 */ 352 static int mthi_func(struct pt_regs *regs, u32 ir) 353 { 354 regs->hi = regs->regs[MIPSInst_RS(ir)]; 355 356 MIPS_R2_STATS(hilo); 357 358 return 0; 359 } 360 361 /** 362 * mflo_func - Emulate a MFLO instruction 363 * @regs: Process register set 364 * @ir: Instruction 365 * 366 * Returns 0 since it always succeeds. 367 */ 368 static int mflo_func(struct pt_regs *regs, u32 ir) 369 { 370 if (MIPSInst_RD(ir)) 371 regs->regs[MIPSInst_RD(ir)] = regs->lo; 372 373 MIPS_R2_STATS(hilo); 374 375 return 0; 376 } 377 378 /** 379 * mtlo_func - Emulate a MTLO instruction 380 * @regs: Process register set 381 * @ir: Instruction 382 * 383 * Returns 0 since it always succeeds. 384 */ 385 static int mtlo_func(struct pt_regs *regs, u32 ir) 386 { 387 regs->lo = regs->regs[MIPSInst_RS(ir)]; 388 389 MIPS_R2_STATS(hilo); 390 391 return 0; 392 } 393 394 /** 395 * mult_func - Emulate a MULT instruction 396 * @regs: Process register set 397 * @ir: Instruction 398 * 399 * Returns 0 since it always succeeds. 400 */ 401 static int mult_func(struct pt_regs *regs, u32 ir) 402 { 403 s64 res; 404 s32 rt, rs; 405 406 rt = regs->regs[MIPSInst_RT(ir)]; 407 rs = regs->regs[MIPSInst_RS(ir)]; 408 res = (s64)rt * (s64)rs; 409 410 rs = res; 411 regs->lo = (s64)rs; 412 rt = res >> 32; 413 res = (s64)rt; 414 regs->hi = res; 415 416 MIPS_R2_STATS(muls); 417 418 return 0; 419 } 420 421 /** 422 * multu_func - Emulate a MULTU instruction 423 * @regs: Process register set 424 * @ir: Instruction 425 * 426 * Returns 0 since it always succeeds. 427 */ 428 static int multu_func(struct pt_regs *regs, u32 ir) 429 { 430 u64 res; 431 u32 rt, rs; 432 433 rt = regs->regs[MIPSInst_RT(ir)]; 434 rs = regs->regs[MIPSInst_RS(ir)]; 435 res = (u64)rt * (u64)rs; 436 rt = res; 437 regs->lo = (s64)rt; 438 regs->hi = (s64)(res >> 32); 439 440 MIPS_R2_STATS(muls); 441 442 return 0; 443 } 444 445 /** 446 * div_func - Emulate a DIV instruction 447 * @regs: Process register set 448 * @ir: Instruction 449 * 450 * Returns 0 since it always succeeds. 451 */ 452 static int div_func(struct pt_regs *regs, u32 ir) 453 { 454 s32 rt, rs; 455 456 rt = regs->regs[MIPSInst_RT(ir)]; 457 rs = regs->regs[MIPSInst_RS(ir)]; 458 459 regs->lo = (s64)(rs / rt); 460 regs->hi = (s64)(rs % rt); 461 462 MIPS_R2_STATS(divs); 463 464 return 0; 465 } 466 467 /** 468 * divu_func - Emulate a DIVU instruction 469 * @regs: Process register set 470 * @ir: Instruction 471 * 472 * Returns 0 since it always succeeds. 473 */ 474 static int divu_func(struct pt_regs *regs, u32 ir) 475 { 476 u32 rt, rs; 477 478 rt = regs->regs[MIPSInst_RT(ir)]; 479 rs = regs->regs[MIPSInst_RS(ir)]; 480 481 regs->lo = (s64)(rs / rt); 482 regs->hi = (s64)(rs % rt); 483 484 MIPS_R2_STATS(divs); 485 486 return 0; 487 } 488 489 /** 490 * dmult_func - Emulate a DMULT instruction 491 * @regs: Process register set 492 * @ir: Instruction 493 * 494 * Returns 0 on success or SIGILL for 32-bit kernels. 495 */ 496 static int dmult_func(struct pt_regs *regs, u32 ir) 497 { 498 s64 res; 499 s64 rt, rs; 500 501 if (IS_ENABLED(CONFIG_32BIT)) 502 return SIGILL; 503 504 rt = regs->regs[MIPSInst_RT(ir)]; 505 rs = regs->regs[MIPSInst_RS(ir)]; 506 res = rt * rs; 507 508 regs->lo = res; 509 __asm__ __volatile__( 510 "dmuh %0, %1, %2\t\n" 511 : "=r"(res) 512 : "r"(rt), "r"(rs)); 513 514 regs->hi = res; 515 516 MIPS_R2_STATS(muls); 517 518 return 0; 519 } 520 521 /** 522 * dmultu_func - Emulate a DMULTU instruction 523 * @regs: Process register set 524 * @ir: Instruction 525 * 526 * Returns 0 on success or SIGILL for 32-bit kernels. 527 */ 528 static int dmultu_func(struct pt_regs *regs, u32 ir) 529 { 530 u64 res; 531 u64 rt, rs; 532 533 if (IS_ENABLED(CONFIG_32BIT)) 534 return SIGILL; 535 536 rt = regs->regs[MIPSInst_RT(ir)]; 537 rs = regs->regs[MIPSInst_RS(ir)]; 538 res = rt * rs; 539 540 regs->lo = res; 541 __asm__ __volatile__( 542 "dmuhu %0, %1, %2\t\n" 543 : "=r"(res) 544 : "r"(rt), "r"(rs)); 545 546 regs->hi = res; 547 548 MIPS_R2_STATS(muls); 549 550 return 0; 551 } 552 553 /** 554 * ddiv_func - Emulate a DDIV instruction 555 * @regs: Process register set 556 * @ir: Instruction 557 * 558 * Returns 0 on success or SIGILL for 32-bit kernels. 559 */ 560 static int ddiv_func(struct pt_regs *regs, u32 ir) 561 { 562 s64 rt, rs; 563 564 if (IS_ENABLED(CONFIG_32BIT)) 565 return SIGILL; 566 567 rt = regs->regs[MIPSInst_RT(ir)]; 568 rs = regs->regs[MIPSInst_RS(ir)]; 569 570 regs->lo = rs / rt; 571 regs->hi = rs % rt; 572 573 MIPS_R2_STATS(divs); 574 575 return 0; 576 } 577 578 /** 579 * ddivu_func - Emulate a DDIVU instruction 580 * @regs: Process register set 581 * @ir: Instruction 582 * 583 * Returns 0 on success or SIGILL for 32-bit kernels. 584 */ 585 static int ddivu_func(struct pt_regs *regs, u32 ir) 586 { 587 u64 rt, rs; 588 589 if (IS_ENABLED(CONFIG_32BIT)) 590 return SIGILL; 591 592 rt = regs->regs[MIPSInst_RT(ir)]; 593 rs = regs->regs[MIPSInst_RS(ir)]; 594 595 regs->lo = rs / rt; 596 regs->hi = rs % rt; 597 598 MIPS_R2_STATS(divs); 599 600 return 0; 601 } 602 603 /* R6 removed instructions for the SPECIAL opcode */ 604 static struct r2_decoder_table spec_op_table[] = { 605 { 0xfc1ff83f, 0x00000008, jr_func }, 606 { 0xfc00ffff, 0x00000018, mult_func }, 607 { 0xfc00ffff, 0x00000019, multu_func }, 608 { 0xfc00ffff, 0x0000001c, dmult_func }, 609 { 0xfc00ffff, 0x0000001d, dmultu_func }, 610 { 0xffff07ff, 0x00000010, mfhi_func }, 611 { 0xfc1fffff, 0x00000011, mthi_func }, 612 { 0xffff07ff, 0x00000012, mflo_func }, 613 { 0xfc1fffff, 0x00000013, mtlo_func }, 614 { 0xfc0307ff, 0x00000001, movf_func }, 615 { 0xfc0307ff, 0x00010001, movt_func }, 616 { 0xfc0007ff, 0x0000000a, movz_func }, 617 { 0xfc0007ff, 0x0000000b, movn_func }, 618 { 0xfc00ffff, 0x0000001a, div_func }, 619 { 0xfc00ffff, 0x0000001b, divu_func }, 620 { 0xfc00ffff, 0x0000001e, ddiv_func }, 621 { 0xfc00ffff, 0x0000001f, ddivu_func }, 622 {} 623 }; 624 625 /** 626 * madd_func - Emulate a MADD instruction 627 * @regs: Process register set 628 * @ir: Instruction 629 * 630 * Returns 0 since it always succeeds. 631 */ 632 static int madd_func(struct pt_regs *regs, u32 ir) 633 { 634 s64 res; 635 s32 rt, rs; 636 637 rt = regs->regs[MIPSInst_RT(ir)]; 638 rs = regs->regs[MIPSInst_RS(ir)]; 639 res = (s64)rt * (s64)rs; 640 rt = regs->hi; 641 rs = regs->lo; 642 res += ((((s64)rt) << 32) | (u32)rs); 643 644 rt = res; 645 regs->lo = (s64)rt; 646 rs = res >> 32; 647 regs->hi = (s64)rs; 648 649 MIPS_R2_STATS(dsps); 650 651 return 0; 652 } 653 654 /** 655 * maddu_func - Emulate a MADDU instruction 656 * @regs: Process register set 657 * @ir: Instruction 658 * 659 * Returns 0 since it always succeeds. 660 */ 661 static int maddu_func(struct pt_regs *regs, u32 ir) 662 { 663 u64 res; 664 u32 rt, rs; 665 666 rt = regs->regs[MIPSInst_RT(ir)]; 667 rs = regs->regs[MIPSInst_RS(ir)]; 668 res = (u64)rt * (u64)rs; 669 rt = regs->hi; 670 rs = regs->lo; 671 res += ((((s64)rt) << 32) | (u32)rs); 672 673 rt = res; 674 regs->lo = (s64)rt; 675 rs = res >> 32; 676 regs->hi = (s64)rs; 677 678 MIPS_R2_STATS(dsps); 679 680 return 0; 681 } 682 683 /** 684 * msub_func - Emulate a MSUB instruction 685 * @regs: Process register set 686 * @ir: Instruction 687 * 688 * Returns 0 since it always succeeds. 689 */ 690 static int msub_func(struct pt_regs *regs, u32 ir) 691 { 692 s64 res; 693 s32 rt, rs; 694 695 rt = regs->regs[MIPSInst_RT(ir)]; 696 rs = regs->regs[MIPSInst_RS(ir)]; 697 res = (s64)rt * (s64)rs; 698 rt = regs->hi; 699 rs = regs->lo; 700 res = ((((s64)rt) << 32) | (u32)rs) - res; 701 702 rt = res; 703 regs->lo = (s64)rt; 704 rs = res >> 32; 705 regs->hi = (s64)rs; 706 707 MIPS_R2_STATS(dsps); 708 709 return 0; 710 } 711 712 /** 713 * msubu_func - Emulate a MSUBU instruction 714 * @regs: Process register set 715 * @ir: Instruction 716 * 717 * Returns 0 since it always succeeds. 718 */ 719 static int msubu_func(struct pt_regs *regs, u32 ir) 720 { 721 u64 res; 722 u32 rt, rs; 723 724 rt = regs->regs[MIPSInst_RT(ir)]; 725 rs = regs->regs[MIPSInst_RS(ir)]; 726 res = (u64)rt * (u64)rs; 727 rt = regs->hi; 728 rs = regs->lo; 729 res = ((((s64)rt) << 32) | (u32)rs) - res; 730 731 rt = res; 732 regs->lo = (s64)rt; 733 rs = res >> 32; 734 regs->hi = (s64)rs; 735 736 MIPS_R2_STATS(dsps); 737 738 return 0; 739 } 740 741 /** 742 * mul_func - Emulate a MUL instruction 743 * @regs: Process register set 744 * @ir: Instruction 745 * 746 * Returns 0 since it always succeeds. 747 */ 748 static int mul_func(struct pt_regs *regs, u32 ir) 749 { 750 s64 res; 751 s32 rt, rs; 752 753 if (!MIPSInst_RD(ir)) 754 return 0; 755 rt = regs->regs[MIPSInst_RT(ir)]; 756 rs = regs->regs[MIPSInst_RS(ir)]; 757 res = (s64)rt * (s64)rs; 758 759 rs = res; 760 regs->regs[MIPSInst_RD(ir)] = (s64)rs; 761 762 MIPS_R2_STATS(muls); 763 764 return 0; 765 } 766 767 /** 768 * clz_func - Emulate a CLZ instruction 769 * @regs: Process register set 770 * @ir: Instruction 771 * 772 * Returns 0 since it always succeeds. 773 */ 774 static int clz_func(struct pt_regs *regs, u32 ir) 775 { 776 u32 res; 777 u32 rs; 778 779 if (!MIPSInst_RD(ir)) 780 return 0; 781 782 rs = regs->regs[MIPSInst_RS(ir)]; 783 __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); 784 regs->regs[MIPSInst_RD(ir)] = res; 785 786 MIPS_R2_STATS(bops); 787 788 return 0; 789 } 790 791 /** 792 * clo_func - Emulate a CLO instruction 793 * @regs: Process register set 794 * @ir: Instruction 795 * 796 * Returns 0 since it always succeeds. 797 */ 798 799 static int clo_func(struct pt_regs *regs, u32 ir) 800 { 801 u32 res; 802 u32 rs; 803 804 if (!MIPSInst_RD(ir)) 805 return 0; 806 807 rs = regs->regs[MIPSInst_RS(ir)]; 808 __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); 809 regs->regs[MIPSInst_RD(ir)] = res; 810 811 MIPS_R2_STATS(bops); 812 813 return 0; 814 } 815 816 /** 817 * dclz_func - Emulate a DCLZ instruction 818 * @regs: Process register set 819 * @ir: Instruction 820 * 821 * Returns 0 since it always succeeds. 822 */ 823 static int dclz_func(struct pt_regs *regs, u32 ir) 824 { 825 u64 res; 826 u64 rs; 827 828 if (IS_ENABLED(CONFIG_32BIT)) 829 return SIGILL; 830 831 if (!MIPSInst_RD(ir)) 832 return 0; 833 834 rs = regs->regs[MIPSInst_RS(ir)]; 835 __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); 836 regs->regs[MIPSInst_RD(ir)] = res; 837 838 MIPS_R2_STATS(bops); 839 840 return 0; 841 } 842 843 /** 844 * dclo_func - Emulate a DCLO instruction 845 * @regs: Process register set 846 * @ir: Instruction 847 * 848 * Returns 0 since it always succeeds. 849 */ 850 static int dclo_func(struct pt_regs *regs, u32 ir) 851 { 852 u64 res; 853 u64 rs; 854 855 if (IS_ENABLED(CONFIG_32BIT)) 856 return SIGILL; 857 858 if (!MIPSInst_RD(ir)) 859 return 0; 860 861 rs = regs->regs[MIPSInst_RS(ir)]; 862 __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); 863 regs->regs[MIPSInst_RD(ir)] = res; 864 865 MIPS_R2_STATS(bops); 866 867 return 0; 868 } 869 870 /* R6 removed instructions for the SPECIAL2 opcode */ 871 static struct r2_decoder_table spec2_op_table[] = { 872 { 0xfc00ffff, 0x70000000, madd_func }, 873 { 0xfc00ffff, 0x70000001, maddu_func }, 874 { 0xfc0007ff, 0x70000002, mul_func }, 875 { 0xfc00ffff, 0x70000004, msub_func }, 876 { 0xfc00ffff, 0x70000005, msubu_func }, 877 { 0xfc0007ff, 0x70000020, clz_func }, 878 { 0xfc0007ff, 0x70000021, clo_func }, 879 { 0xfc0007ff, 0x70000024, dclz_func }, 880 { 0xfc0007ff, 0x70000025, dclo_func }, 881 { } 882 }; 883 884 static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, 885 struct r2_decoder_table *table) 886 { 887 struct r2_decoder_table *p; 888 int err; 889 890 for (p = table; p->func; p++) { 891 if ((inst & p->mask) == p->code) { 892 err = (p->func)(regs, inst); 893 return err; 894 } 895 } 896 return SIGILL; 897 } 898 899 /** 900 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction 901 * @regs: Process register set 902 * @inst: Instruction to decode and emulate 903 * @fcr31: Floating Point Control and Status Register returned 904 */ 905 int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) 906 { 907 int err = 0; 908 unsigned long vaddr; 909 u32 nir; 910 unsigned long cpc, epc, nepc, r31, res, rs, rt; 911 912 void __user *fault_addr = NULL; 913 int pass = 0; 914 915 repeat: 916 r31 = regs->regs[31]; 917 epc = regs->cp0_epc; 918 err = compute_return_epc(regs); 919 if (err < 0) { 920 BUG(); 921 return SIGEMT; 922 } 923 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", 924 inst, epc, pass); 925 926 switch (MIPSInst_OPCODE(inst)) { 927 case spec_op: 928 err = mipsr2_find_op_func(regs, inst, spec_op_table); 929 if (err < 0) { 930 /* FPU instruction under JR */ 931 regs->cp0_cause |= CAUSEF_BD; 932 goto fpu_emul; 933 } 934 break; 935 case spec2_op: 936 err = mipsr2_find_op_func(regs, inst, spec2_op_table); 937 break; 938 case bcond_op: 939 rt = MIPSInst_RT(inst); 940 rs = MIPSInst_RS(inst); 941 switch (rt) { 942 case tgei_op: 943 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 944 do_trap_or_bp(regs, 0, 0, "TGEI"); 945 946 MIPS_R2_STATS(traps); 947 948 break; 949 case tgeiu_op: 950 if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 951 do_trap_or_bp(regs, 0, 0, "TGEIU"); 952 953 MIPS_R2_STATS(traps); 954 955 break; 956 case tlti_op: 957 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 958 do_trap_or_bp(regs, 0, 0, "TLTI"); 959 960 MIPS_R2_STATS(traps); 961 962 break; 963 case tltiu_op: 964 if (regs->regs[rs] < MIPSInst_UIMM(inst)) 965 do_trap_or_bp(regs, 0, 0, "TLTIU"); 966 967 MIPS_R2_STATS(traps); 968 969 break; 970 case teqi_op: 971 if (regs->regs[rs] == MIPSInst_SIMM(inst)) 972 do_trap_or_bp(regs, 0, 0, "TEQI"); 973 974 MIPS_R2_STATS(traps); 975 976 break; 977 case tnei_op: 978 if (regs->regs[rs] != MIPSInst_SIMM(inst)) 979 do_trap_or_bp(regs, 0, 0, "TNEI"); 980 981 MIPS_R2_STATS(traps); 982 983 break; 984 case bltzl_op: 985 case bgezl_op: 986 case bltzall_op: 987 case bgezall_op: 988 if (delay_slot(regs)) { 989 err = SIGILL; 990 break; 991 } 992 regs->regs[31] = r31; 993 regs->cp0_epc = epc; 994 err = __compute_return_epc(regs); 995 if (err < 0) 996 return SIGEMT; 997 if (err != BRANCH_LIKELY_TAKEN) 998 break; 999 cpc = regs->cp0_epc; 1000 nepc = epc + 4; 1001 err = __get_user(nir, (u32 __user *)nepc); 1002 if (err) { 1003 err = SIGSEGV; 1004 break; 1005 } 1006 /* 1007 * This will probably be optimized away when 1008 * CONFIG_DEBUG_FS is not enabled 1009 */ 1010 switch (rt) { 1011 case bltzl_op: 1012 MIPS_R2BR_STATS(bltzl); 1013 break; 1014 case bgezl_op: 1015 MIPS_R2BR_STATS(bgezl); 1016 break; 1017 case bltzall_op: 1018 MIPS_R2BR_STATS(bltzall); 1019 break; 1020 case bgezall_op: 1021 MIPS_R2BR_STATS(bgezall); 1022 break; 1023 } 1024 1025 switch (MIPSInst_OPCODE(nir)) { 1026 case cop1_op: 1027 case cop1x_op: 1028 case lwc1_op: 1029 case swc1_op: 1030 regs->cp0_cause |= CAUSEF_BD; 1031 goto fpu_emul; 1032 } 1033 if (nir) { 1034 err = mipsr6_emul(regs, nir); 1035 if (err > 0) { 1036 err = mips_dsemul(regs, nir, epc, cpc); 1037 if (err == SIGILL) 1038 err = SIGEMT; 1039 MIPS_R2_STATS(dsemul); 1040 } 1041 } 1042 break; 1043 case bltzal_op: 1044 case bgezal_op: 1045 if (delay_slot(regs)) { 1046 err = SIGILL; 1047 break; 1048 } 1049 regs->regs[31] = r31; 1050 regs->cp0_epc = epc; 1051 err = __compute_return_epc(regs); 1052 if (err < 0) 1053 return SIGEMT; 1054 cpc = regs->cp0_epc; 1055 nepc = epc + 4; 1056 err = __get_user(nir, (u32 __user *)nepc); 1057 if (err) { 1058 err = SIGSEGV; 1059 break; 1060 } 1061 /* 1062 * This will probably be optimized away when 1063 * CONFIG_DEBUG_FS is not enabled 1064 */ 1065 switch (rt) { 1066 case bltzal_op: 1067 MIPS_R2BR_STATS(bltzal); 1068 break; 1069 case bgezal_op: 1070 MIPS_R2BR_STATS(bgezal); 1071 break; 1072 } 1073 1074 switch (MIPSInst_OPCODE(nir)) { 1075 case cop1_op: 1076 case cop1x_op: 1077 case lwc1_op: 1078 case swc1_op: 1079 regs->cp0_cause |= CAUSEF_BD; 1080 goto fpu_emul; 1081 } 1082 if (nir) { 1083 err = mipsr6_emul(regs, nir); 1084 if (err > 0) { 1085 err = mips_dsemul(regs, nir, epc, cpc); 1086 if (err == SIGILL) 1087 err = SIGEMT; 1088 MIPS_R2_STATS(dsemul); 1089 } 1090 } 1091 break; 1092 default: 1093 regs->regs[31] = r31; 1094 regs->cp0_epc = epc; 1095 err = SIGILL; 1096 break; 1097 } 1098 break; 1099 1100 case beql_op: 1101 case bnel_op: 1102 case blezl_op: 1103 case bgtzl_op: 1104 if (delay_slot(regs)) { 1105 err = SIGILL; 1106 break; 1107 } 1108 regs->regs[31] = r31; 1109 regs->cp0_epc = epc; 1110 err = __compute_return_epc(regs); 1111 if (err < 0) 1112 return SIGEMT; 1113 if (err != BRANCH_LIKELY_TAKEN) 1114 break; 1115 cpc = regs->cp0_epc; 1116 nepc = epc + 4; 1117 err = __get_user(nir, (u32 __user *)nepc); 1118 if (err) { 1119 err = SIGSEGV; 1120 break; 1121 } 1122 /* 1123 * This will probably be optimized away when 1124 * CONFIG_DEBUG_FS is not enabled 1125 */ 1126 switch (MIPSInst_OPCODE(inst)) { 1127 case beql_op: 1128 MIPS_R2BR_STATS(beql); 1129 break; 1130 case bnel_op: 1131 MIPS_R2BR_STATS(bnel); 1132 break; 1133 case blezl_op: 1134 MIPS_R2BR_STATS(blezl); 1135 break; 1136 case bgtzl_op: 1137 MIPS_R2BR_STATS(bgtzl); 1138 break; 1139 } 1140 1141 switch (MIPSInst_OPCODE(nir)) { 1142 case cop1_op: 1143 case cop1x_op: 1144 case lwc1_op: 1145 case swc1_op: 1146 regs->cp0_cause |= CAUSEF_BD; 1147 goto fpu_emul; 1148 } 1149 if (nir) { 1150 err = mipsr6_emul(regs, nir); 1151 if (err > 0) { 1152 err = mips_dsemul(regs, nir, epc, cpc); 1153 if (err == SIGILL) 1154 err = SIGEMT; 1155 MIPS_R2_STATS(dsemul); 1156 } 1157 } 1158 break; 1159 case lwc1_op: 1160 case swc1_op: 1161 case cop1_op: 1162 case cop1x_op: 1163 fpu_emul: 1164 regs->regs[31] = r31; 1165 regs->cp0_epc = epc; 1166 if (!used_math()) { /* First time FPU user. */ 1167 preempt_disable(); 1168 err = init_fpu(); 1169 preempt_enable(); 1170 set_used_math(); 1171 } 1172 lose_fpu(1); /* Save FPU state for the emulator. */ 1173 1174 err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, 1175 &fault_addr); 1176 *fcr31 = current->thread.fpu.fcr31; 1177 1178 /* 1179 * We can't allow the emulated instruction to leave any of 1180 * the cause bits set in $fcr31. 1181 */ 1182 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1183 1184 /* 1185 * this is a tricky issue - lose_fpu() uses LL/SC atomics 1186 * if FPU is owned and effectively cancels user level LL/SC. 1187 * So, it could be logical to don't restore FPU ownership here. 1188 * But the sequence of multiple FPU instructions is much much 1189 * more often than LL-FPU-SC and I prefer loop here until 1190 * next scheduler cycle cancels FPU ownership 1191 */ 1192 own_fpu(1); /* Restore FPU state. */ 1193 1194 if (err) 1195 current->thread.cp0_baduaddr = (unsigned long)fault_addr; 1196 1197 MIPS_R2_STATS(fpus); 1198 1199 break; 1200 1201 case lwl_op: 1202 rt = regs->regs[MIPSInst_RT(inst)]; 1203 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1204 if (!access_ok(VERIFY_READ, vaddr, 4)) { 1205 current->thread.cp0_baduaddr = vaddr; 1206 err = SIGSEGV; 1207 break; 1208 } 1209 __asm__ __volatile__( 1210 " .set push\n" 1211 " .set reorder\n" 1212 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1213 "1:" LB "%1, 0(%2)\n" 1214 INS "%0, %1, 24, 8\n" 1215 " andi %1, %2, 0x3\n" 1216 " beq $0, %1, 9f\n" 1217 ADDIU "%2, %2, -1\n" 1218 "2:" LB "%1, 0(%2)\n" 1219 INS "%0, %1, 16, 8\n" 1220 " andi %1, %2, 0x3\n" 1221 " beq $0, %1, 9f\n" 1222 ADDIU "%2, %2, -1\n" 1223 "3:" LB "%1, 0(%2)\n" 1224 INS "%0, %1, 8, 8\n" 1225 " andi %1, %2, 0x3\n" 1226 " beq $0, %1, 9f\n" 1227 ADDIU "%2, %2, -1\n" 1228 "4:" LB "%1, 0(%2)\n" 1229 INS "%0, %1, 0, 8\n" 1230 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1231 "1:" LB "%1, 0(%2)\n" 1232 INS "%0, %1, 24, 8\n" 1233 ADDIU "%2, %2, 1\n" 1234 " andi %1, %2, 0x3\n" 1235 " beq $0, %1, 9f\n" 1236 "2:" LB "%1, 0(%2)\n" 1237 INS "%0, %1, 16, 8\n" 1238 ADDIU "%2, %2, 1\n" 1239 " andi %1, %2, 0x3\n" 1240 " beq $0, %1, 9f\n" 1241 "3:" LB "%1, 0(%2)\n" 1242 INS "%0, %1, 8, 8\n" 1243 ADDIU "%2, %2, 1\n" 1244 " andi %1, %2, 0x3\n" 1245 " beq $0, %1, 9f\n" 1246 "4:" LB "%1, 0(%2)\n" 1247 INS "%0, %1, 0, 8\n" 1248 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1249 "9: sll %0, %0, 0\n" 1250 "10:\n" 1251 " .insn\n" 1252 " .section .fixup,\"ax\"\n" 1253 "8: li %3,%4\n" 1254 " j 10b\n" 1255 " .previous\n" 1256 " .section __ex_table,\"a\"\n" 1257 STR(PTR) " 1b,8b\n" 1258 STR(PTR) " 2b,8b\n" 1259 STR(PTR) " 3b,8b\n" 1260 STR(PTR) " 4b,8b\n" 1261 " .previous\n" 1262 " .set pop\n" 1263 : "+&r"(rt), "=&r"(rs), 1264 "+&r"(vaddr), "+&r"(err) 1265 : "i"(SIGSEGV)); 1266 1267 if (MIPSInst_RT(inst) && !err) 1268 regs->regs[MIPSInst_RT(inst)] = rt; 1269 1270 MIPS_R2_STATS(loads); 1271 1272 break; 1273 1274 case lwr_op: 1275 rt = regs->regs[MIPSInst_RT(inst)]; 1276 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1277 if (!access_ok(VERIFY_READ, vaddr, 4)) { 1278 current->thread.cp0_baduaddr = vaddr; 1279 err = SIGSEGV; 1280 break; 1281 } 1282 __asm__ __volatile__( 1283 " .set push\n" 1284 " .set reorder\n" 1285 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1286 "1:" LB "%1, 0(%2)\n" 1287 INS "%0, %1, 0, 8\n" 1288 ADDIU "%2, %2, 1\n" 1289 " andi %1, %2, 0x3\n" 1290 " beq $0, %1, 9f\n" 1291 "2:" LB "%1, 0(%2)\n" 1292 INS "%0, %1, 8, 8\n" 1293 ADDIU "%2, %2, 1\n" 1294 " andi %1, %2, 0x3\n" 1295 " beq $0, %1, 9f\n" 1296 "3:" LB "%1, 0(%2)\n" 1297 INS "%0, %1, 16, 8\n" 1298 ADDIU "%2, %2, 1\n" 1299 " andi %1, %2, 0x3\n" 1300 " beq $0, %1, 9f\n" 1301 "4:" LB "%1, 0(%2)\n" 1302 INS "%0, %1, 24, 8\n" 1303 " sll %0, %0, 0\n" 1304 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1305 "1:" LB "%1, 0(%2)\n" 1306 INS "%0, %1, 0, 8\n" 1307 " andi %1, %2, 0x3\n" 1308 " beq $0, %1, 9f\n" 1309 ADDIU "%2, %2, -1\n" 1310 "2:" LB "%1, 0(%2)\n" 1311 INS "%0, %1, 8, 8\n" 1312 " andi %1, %2, 0x3\n" 1313 " beq $0, %1, 9f\n" 1314 ADDIU "%2, %2, -1\n" 1315 "3:" LB "%1, 0(%2)\n" 1316 INS "%0, %1, 16, 8\n" 1317 " andi %1, %2, 0x3\n" 1318 " beq $0, %1, 9f\n" 1319 ADDIU "%2, %2, -1\n" 1320 "4:" LB "%1, 0(%2)\n" 1321 INS "%0, %1, 24, 8\n" 1322 " sll %0, %0, 0\n" 1323 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1324 "9:\n" 1325 "10:\n" 1326 " .insn\n" 1327 " .section .fixup,\"ax\"\n" 1328 "8: li %3,%4\n" 1329 " j 10b\n" 1330 " .previous\n" 1331 " .section __ex_table,\"a\"\n" 1332 STR(PTR) " 1b,8b\n" 1333 STR(PTR) " 2b,8b\n" 1334 STR(PTR) " 3b,8b\n" 1335 STR(PTR) " 4b,8b\n" 1336 " .previous\n" 1337 " .set pop\n" 1338 : "+&r"(rt), "=&r"(rs), 1339 "+&r"(vaddr), "+&r"(err) 1340 : "i"(SIGSEGV)); 1341 if (MIPSInst_RT(inst) && !err) 1342 regs->regs[MIPSInst_RT(inst)] = rt; 1343 1344 MIPS_R2_STATS(loads); 1345 1346 break; 1347 1348 case swl_op: 1349 rt = regs->regs[MIPSInst_RT(inst)]; 1350 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1351 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1352 current->thread.cp0_baduaddr = vaddr; 1353 err = SIGSEGV; 1354 break; 1355 } 1356 __asm__ __volatile__( 1357 " .set push\n" 1358 " .set reorder\n" 1359 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1360 EXT "%1, %0, 24, 8\n" 1361 "1:" SB "%1, 0(%2)\n" 1362 " andi %1, %2, 0x3\n" 1363 " beq $0, %1, 9f\n" 1364 ADDIU "%2, %2, -1\n" 1365 EXT "%1, %0, 16, 8\n" 1366 "2:" SB "%1, 0(%2)\n" 1367 " andi %1, %2, 0x3\n" 1368 " beq $0, %1, 9f\n" 1369 ADDIU "%2, %2, -1\n" 1370 EXT "%1, %0, 8, 8\n" 1371 "3:" SB "%1, 0(%2)\n" 1372 " andi %1, %2, 0x3\n" 1373 " beq $0, %1, 9f\n" 1374 ADDIU "%2, %2, -1\n" 1375 EXT "%1, %0, 0, 8\n" 1376 "4:" SB "%1, 0(%2)\n" 1377 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1378 EXT "%1, %0, 24, 8\n" 1379 "1:" SB "%1, 0(%2)\n" 1380 ADDIU "%2, %2, 1\n" 1381 " andi %1, %2, 0x3\n" 1382 " beq $0, %1, 9f\n" 1383 EXT "%1, %0, 16, 8\n" 1384 "2:" SB "%1, 0(%2)\n" 1385 ADDIU "%2, %2, 1\n" 1386 " andi %1, %2, 0x3\n" 1387 " beq $0, %1, 9f\n" 1388 EXT "%1, %0, 8, 8\n" 1389 "3:" SB "%1, 0(%2)\n" 1390 ADDIU "%2, %2, 1\n" 1391 " andi %1, %2, 0x3\n" 1392 " beq $0, %1, 9f\n" 1393 EXT "%1, %0, 0, 8\n" 1394 "4:" SB "%1, 0(%2)\n" 1395 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1396 "9:\n" 1397 " .insn\n" 1398 " .section .fixup,\"ax\"\n" 1399 "8: li %3,%4\n" 1400 " j 9b\n" 1401 " .previous\n" 1402 " .section __ex_table,\"a\"\n" 1403 STR(PTR) " 1b,8b\n" 1404 STR(PTR) " 2b,8b\n" 1405 STR(PTR) " 3b,8b\n" 1406 STR(PTR) " 4b,8b\n" 1407 " .previous\n" 1408 " .set pop\n" 1409 : "+&r"(rt), "=&r"(rs), 1410 "+&r"(vaddr), "+&r"(err) 1411 : "i"(SIGSEGV) 1412 : "memory"); 1413 1414 MIPS_R2_STATS(stores); 1415 1416 break; 1417 1418 case swr_op: 1419 rt = regs->regs[MIPSInst_RT(inst)]; 1420 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1421 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1422 current->thread.cp0_baduaddr = vaddr; 1423 err = SIGSEGV; 1424 break; 1425 } 1426 __asm__ __volatile__( 1427 " .set push\n" 1428 " .set reorder\n" 1429 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1430 EXT "%1, %0, 0, 8\n" 1431 "1:" SB "%1, 0(%2)\n" 1432 ADDIU "%2, %2, 1\n" 1433 " andi %1, %2, 0x3\n" 1434 " beq $0, %1, 9f\n" 1435 EXT "%1, %0, 8, 8\n" 1436 "2:" SB "%1, 0(%2)\n" 1437 ADDIU "%2, %2, 1\n" 1438 " andi %1, %2, 0x3\n" 1439 " beq $0, %1, 9f\n" 1440 EXT "%1, %0, 16, 8\n" 1441 "3:" SB "%1, 0(%2)\n" 1442 ADDIU "%2, %2, 1\n" 1443 " andi %1, %2, 0x3\n" 1444 " beq $0, %1, 9f\n" 1445 EXT "%1, %0, 24, 8\n" 1446 "4:" SB "%1, 0(%2)\n" 1447 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1448 EXT "%1, %0, 0, 8\n" 1449 "1:" SB "%1, 0(%2)\n" 1450 " andi %1, %2, 0x3\n" 1451 " beq $0, %1, 9f\n" 1452 ADDIU "%2, %2, -1\n" 1453 EXT "%1, %0, 8, 8\n" 1454 "2:" SB "%1, 0(%2)\n" 1455 " andi %1, %2, 0x3\n" 1456 " beq $0, %1, 9f\n" 1457 ADDIU "%2, %2, -1\n" 1458 EXT "%1, %0, 16, 8\n" 1459 "3:" SB "%1, 0(%2)\n" 1460 " andi %1, %2, 0x3\n" 1461 " beq $0, %1, 9f\n" 1462 ADDIU "%2, %2, -1\n" 1463 EXT "%1, %0, 24, 8\n" 1464 "4:" SB "%1, 0(%2)\n" 1465 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1466 "9:\n" 1467 " .insn\n" 1468 " .section .fixup,\"ax\"\n" 1469 "8: li %3,%4\n" 1470 " j 9b\n" 1471 " .previous\n" 1472 " .section __ex_table,\"a\"\n" 1473 STR(PTR) " 1b,8b\n" 1474 STR(PTR) " 2b,8b\n" 1475 STR(PTR) " 3b,8b\n" 1476 STR(PTR) " 4b,8b\n" 1477 " .previous\n" 1478 " .set pop\n" 1479 : "+&r"(rt), "=&r"(rs), 1480 "+&r"(vaddr), "+&r"(err) 1481 : "i"(SIGSEGV) 1482 : "memory"); 1483 1484 MIPS_R2_STATS(stores); 1485 1486 break; 1487 1488 case ldl_op: 1489 if (IS_ENABLED(CONFIG_32BIT)) { 1490 err = SIGILL; 1491 break; 1492 } 1493 1494 rt = regs->regs[MIPSInst_RT(inst)]; 1495 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1496 if (!access_ok(VERIFY_READ, vaddr, 8)) { 1497 current->thread.cp0_baduaddr = vaddr; 1498 err = SIGSEGV; 1499 break; 1500 } 1501 __asm__ __volatile__( 1502 " .set push\n" 1503 " .set reorder\n" 1504 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1505 "1: lb %1, 0(%2)\n" 1506 " dinsu %0, %1, 56, 8\n" 1507 " andi %1, %2, 0x7\n" 1508 " beq $0, %1, 9f\n" 1509 " daddiu %2, %2, -1\n" 1510 "2: lb %1, 0(%2)\n" 1511 " dinsu %0, %1, 48, 8\n" 1512 " andi %1, %2, 0x7\n" 1513 " beq $0, %1, 9f\n" 1514 " daddiu %2, %2, -1\n" 1515 "3: lb %1, 0(%2)\n" 1516 " dinsu %0, %1, 40, 8\n" 1517 " andi %1, %2, 0x7\n" 1518 " beq $0, %1, 9f\n" 1519 " daddiu %2, %2, -1\n" 1520 "4: lb %1, 0(%2)\n" 1521 " dinsu %0, %1, 32, 8\n" 1522 " andi %1, %2, 0x7\n" 1523 " beq $0, %1, 9f\n" 1524 " daddiu %2, %2, -1\n" 1525 "5: lb %1, 0(%2)\n" 1526 " dins %0, %1, 24, 8\n" 1527 " andi %1, %2, 0x7\n" 1528 " beq $0, %1, 9f\n" 1529 " daddiu %2, %2, -1\n" 1530 "6: lb %1, 0(%2)\n" 1531 " dins %0, %1, 16, 8\n" 1532 " andi %1, %2, 0x7\n" 1533 " beq $0, %1, 9f\n" 1534 " daddiu %2, %2, -1\n" 1535 "7: lb %1, 0(%2)\n" 1536 " dins %0, %1, 8, 8\n" 1537 " andi %1, %2, 0x7\n" 1538 " beq $0, %1, 9f\n" 1539 " daddiu %2, %2, -1\n" 1540 "0: lb %1, 0(%2)\n" 1541 " dins %0, %1, 0, 8\n" 1542 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1543 "1: lb %1, 0(%2)\n" 1544 " dinsu %0, %1, 56, 8\n" 1545 " daddiu %2, %2, 1\n" 1546 " andi %1, %2, 0x7\n" 1547 " beq $0, %1, 9f\n" 1548 "2: lb %1, 0(%2)\n" 1549 " dinsu %0, %1, 48, 8\n" 1550 " daddiu %2, %2, 1\n" 1551 " andi %1, %2, 0x7\n" 1552 " beq $0, %1, 9f\n" 1553 "3: lb %1, 0(%2)\n" 1554 " dinsu %0, %1, 40, 8\n" 1555 " daddiu %2, %2, 1\n" 1556 " andi %1, %2, 0x7\n" 1557 " beq $0, %1, 9f\n" 1558 "4: lb %1, 0(%2)\n" 1559 " dinsu %0, %1, 32, 8\n" 1560 " daddiu %2, %2, 1\n" 1561 " andi %1, %2, 0x7\n" 1562 " beq $0, %1, 9f\n" 1563 "5: lb %1, 0(%2)\n" 1564 " dins %0, %1, 24, 8\n" 1565 " daddiu %2, %2, 1\n" 1566 " andi %1, %2, 0x7\n" 1567 " beq $0, %1, 9f\n" 1568 "6: lb %1, 0(%2)\n" 1569 " dins %0, %1, 16, 8\n" 1570 " daddiu %2, %2, 1\n" 1571 " andi %1, %2, 0x7\n" 1572 " beq $0, %1, 9f\n" 1573 "7: lb %1, 0(%2)\n" 1574 " dins %0, %1, 8, 8\n" 1575 " daddiu %2, %2, 1\n" 1576 " andi %1, %2, 0x7\n" 1577 " beq $0, %1, 9f\n" 1578 "0: lb %1, 0(%2)\n" 1579 " dins %0, %1, 0, 8\n" 1580 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1581 "9:\n" 1582 " .insn\n" 1583 " .section .fixup,\"ax\"\n" 1584 "8: li %3,%4\n" 1585 " j 9b\n" 1586 " .previous\n" 1587 " .section __ex_table,\"a\"\n" 1588 STR(PTR) " 1b,8b\n" 1589 STR(PTR) " 2b,8b\n" 1590 STR(PTR) " 3b,8b\n" 1591 STR(PTR) " 4b,8b\n" 1592 STR(PTR) " 5b,8b\n" 1593 STR(PTR) " 6b,8b\n" 1594 STR(PTR) " 7b,8b\n" 1595 STR(PTR) " 0b,8b\n" 1596 " .previous\n" 1597 " .set pop\n" 1598 : "+&r"(rt), "=&r"(rs), 1599 "+&r"(vaddr), "+&r"(err) 1600 : "i"(SIGSEGV)); 1601 if (MIPSInst_RT(inst) && !err) 1602 regs->regs[MIPSInst_RT(inst)] = rt; 1603 1604 MIPS_R2_STATS(loads); 1605 break; 1606 1607 case ldr_op: 1608 if (IS_ENABLED(CONFIG_32BIT)) { 1609 err = SIGILL; 1610 break; 1611 } 1612 1613 rt = regs->regs[MIPSInst_RT(inst)]; 1614 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1615 if (!access_ok(VERIFY_READ, vaddr, 8)) { 1616 current->thread.cp0_baduaddr = vaddr; 1617 err = SIGSEGV; 1618 break; 1619 } 1620 __asm__ __volatile__( 1621 " .set push\n" 1622 " .set reorder\n" 1623 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1624 "1: lb %1, 0(%2)\n" 1625 " dins %0, %1, 0, 8\n" 1626 " daddiu %2, %2, 1\n" 1627 " andi %1, %2, 0x7\n" 1628 " beq $0, %1, 9f\n" 1629 "2: lb %1, 0(%2)\n" 1630 " dins %0, %1, 8, 8\n" 1631 " daddiu %2, %2, 1\n" 1632 " andi %1, %2, 0x7\n" 1633 " beq $0, %1, 9f\n" 1634 "3: lb %1, 0(%2)\n" 1635 " dins %0, %1, 16, 8\n" 1636 " daddiu %2, %2, 1\n" 1637 " andi %1, %2, 0x7\n" 1638 " beq $0, %1, 9f\n" 1639 "4: lb %1, 0(%2)\n" 1640 " dins %0, %1, 24, 8\n" 1641 " daddiu %2, %2, 1\n" 1642 " andi %1, %2, 0x7\n" 1643 " beq $0, %1, 9f\n" 1644 "5: lb %1, 0(%2)\n" 1645 " dinsu %0, %1, 32, 8\n" 1646 " daddiu %2, %2, 1\n" 1647 " andi %1, %2, 0x7\n" 1648 " beq $0, %1, 9f\n" 1649 "6: lb %1, 0(%2)\n" 1650 " dinsu %0, %1, 40, 8\n" 1651 " daddiu %2, %2, 1\n" 1652 " andi %1, %2, 0x7\n" 1653 " beq $0, %1, 9f\n" 1654 "7: lb %1, 0(%2)\n" 1655 " dinsu %0, %1, 48, 8\n" 1656 " daddiu %2, %2, 1\n" 1657 " andi %1, %2, 0x7\n" 1658 " beq $0, %1, 9f\n" 1659 "0: lb %1, 0(%2)\n" 1660 " dinsu %0, %1, 56, 8\n" 1661 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1662 "1: lb %1, 0(%2)\n" 1663 " dins %0, %1, 0, 8\n" 1664 " andi %1, %2, 0x7\n" 1665 " beq $0, %1, 9f\n" 1666 " daddiu %2, %2, -1\n" 1667 "2: lb %1, 0(%2)\n" 1668 " dins %0, %1, 8, 8\n" 1669 " andi %1, %2, 0x7\n" 1670 " beq $0, %1, 9f\n" 1671 " daddiu %2, %2, -1\n" 1672 "3: lb %1, 0(%2)\n" 1673 " dins %0, %1, 16, 8\n" 1674 " andi %1, %2, 0x7\n" 1675 " beq $0, %1, 9f\n" 1676 " daddiu %2, %2, -1\n" 1677 "4: lb %1, 0(%2)\n" 1678 " dins %0, %1, 24, 8\n" 1679 " andi %1, %2, 0x7\n" 1680 " beq $0, %1, 9f\n" 1681 " daddiu %2, %2, -1\n" 1682 "5: lb %1, 0(%2)\n" 1683 " dinsu %0, %1, 32, 8\n" 1684 " andi %1, %2, 0x7\n" 1685 " beq $0, %1, 9f\n" 1686 " daddiu %2, %2, -1\n" 1687 "6: lb %1, 0(%2)\n" 1688 " dinsu %0, %1, 40, 8\n" 1689 " andi %1, %2, 0x7\n" 1690 " beq $0, %1, 9f\n" 1691 " daddiu %2, %2, -1\n" 1692 "7: lb %1, 0(%2)\n" 1693 " dinsu %0, %1, 48, 8\n" 1694 " andi %1, %2, 0x7\n" 1695 " beq $0, %1, 9f\n" 1696 " daddiu %2, %2, -1\n" 1697 "0: lb %1, 0(%2)\n" 1698 " dinsu %0, %1, 56, 8\n" 1699 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1700 "9:\n" 1701 " .insn\n" 1702 " .section .fixup,\"ax\"\n" 1703 "8: li %3,%4\n" 1704 " j 9b\n" 1705 " .previous\n" 1706 " .section __ex_table,\"a\"\n" 1707 STR(PTR) " 1b,8b\n" 1708 STR(PTR) " 2b,8b\n" 1709 STR(PTR) " 3b,8b\n" 1710 STR(PTR) " 4b,8b\n" 1711 STR(PTR) " 5b,8b\n" 1712 STR(PTR) " 6b,8b\n" 1713 STR(PTR) " 7b,8b\n" 1714 STR(PTR) " 0b,8b\n" 1715 " .previous\n" 1716 " .set pop\n" 1717 : "+&r"(rt), "=&r"(rs), 1718 "+&r"(vaddr), "+&r"(err) 1719 : "i"(SIGSEGV)); 1720 if (MIPSInst_RT(inst) && !err) 1721 regs->regs[MIPSInst_RT(inst)] = rt; 1722 1723 MIPS_R2_STATS(loads); 1724 break; 1725 1726 case sdl_op: 1727 if (IS_ENABLED(CONFIG_32BIT)) { 1728 err = SIGILL; 1729 break; 1730 } 1731 1732 rt = regs->regs[MIPSInst_RT(inst)]; 1733 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1734 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1735 current->thread.cp0_baduaddr = vaddr; 1736 err = SIGSEGV; 1737 break; 1738 } 1739 __asm__ __volatile__( 1740 " .set push\n" 1741 " .set reorder\n" 1742 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1743 " dextu %1, %0, 56, 8\n" 1744 "1: sb %1, 0(%2)\n" 1745 " andi %1, %2, 0x7\n" 1746 " beq $0, %1, 9f\n" 1747 " daddiu %2, %2, -1\n" 1748 " dextu %1, %0, 48, 8\n" 1749 "2: sb %1, 0(%2)\n" 1750 " andi %1, %2, 0x7\n" 1751 " beq $0, %1, 9f\n" 1752 " daddiu %2, %2, -1\n" 1753 " dextu %1, %0, 40, 8\n" 1754 "3: sb %1, 0(%2)\n" 1755 " andi %1, %2, 0x7\n" 1756 " beq $0, %1, 9f\n" 1757 " daddiu %2, %2, -1\n" 1758 " dextu %1, %0, 32, 8\n" 1759 "4: sb %1, 0(%2)\n" 1760 " andi %1, %2, 0x7\n" 1761 " beq $0, %1, 9f\n" 1762 " daddiu %2, %2, -1\n" 1763 " dext %1, %0, 24, 8\n" 1764 "5: sb %1, 0(%2)\n" 1765 " andi %1, %2, 0x7\n" 1766 " beq $0, %1, 9f\n" 1767 " daddiu %2, %2, -1\n" 1768 " dext %1, %0, 16, 8\n" 1769 "6: sb %1, 0(%2)\n" 1770 " andi %1, %2, 0x7\n" 1771 " beq $0, %1, 9f\n" 1772 " daddiu %2, %2, -1\n" 1773 " dext %1, %0, 8, 8\n" 1774 "7: sb %1, 0(%2)\n" 1775 " andi %1, %2, 0x7\n" 1776 " beq $0, %1, 9f\n" 1777 " daddiu %2, %2, -1\n" 1778 " dext %1, %0, 0, 8\n" 1779 "0: sb %1, 0(%2)\n" 1780 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1781 " dextu %1, %0, 56, 8\n" 1782 "1: sb %1, 0(%2)\n" 1783 " daddiu %2, %2, 1\n" 1784 " andi %1, %2, 0x7\n" 1785 " beq $0, %1, 9f\n" 1786 " dextu %1, %0, 48, 8\n" 1787 "2: sb %1, 0(%2)\n" 1788 " daddiu %2, %2, 1\n" 1789 " andi %1, %2, 0x7\n" 1790 " beq $0, %1, 9f\n" 1791 " dextu %1, %0, 40, 8\n" 1792 "3: sb %1, 0(%2)\n" 1793 " daddiu %2, %2, 1\n" 1794 " andi %1, %2, 0x7\n" 1795 " beq $0, %1, 9f\n" 1796 " dextu %1, %0, 32, 8\n" 1797 "4: sb %1, 0(%2)\n" 1798 " daddiu %2, %2, 1\n" 1799 " andi %1, %2, 0x7\n" 1800 " beq $0, %1, 9f\n" 1801 " dext %1, %0, 24, 8\n" 1802 "5: sb %1, 0(%2)\n" 1803 " daddiu %2, %2, 1\n" 1804 " andi %1, %2, 0x7\n" 1805 " beq $0, %1, 9f\n" 1806 " dext %1, %0, 16, 8\n" 1807 "6: sb %1, 0(%2)\n" 1808 " daddiu %2, %2, 1\n" 1809 " andi %1, %2, 0x7\n" 1810 " beq $0, %1, 9f\n" 1811 " dext %1, %0, 8, 8\n" 1812 "7: sb %1, 0(%2)\n" 1813 " daddiu %2, %2, 1\n" 1814 " andi %1, %2, 0x7\n" 1815 " beq $0, %1, 9f\n" 1816 " dext %1, %0, 0, 8\n" 1817 "0: sb %1, 0(%2)\n" 1818 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1819 "9:\n" 1820 " .insn\n" 1821 " .section .fixup,\"ax\"\n" 1822 "8: li %3,%4\n" 1823 " j 9b\n" 1824 " .previous\n" 1825 " .section __ex_table,\"a\"\n" 1826 STR(PTR) " 1b,8b\n" 1827 STR(PTR) " 2b,8b\n" 1828 STR(PTR) " 3b,8b\n" 1829 STR(PTR) " 4b,8b\n" 1830 STR(PTR) " 5b,8b\n" 1831 STR(PTR) " 6b,8b\n" 1832 STR(PTR) " 7b,8b\n" 1833 STR(PTR) " 0b,8b\n" 1834 " .previous\n" 1835 " .set pop\n" 1836 : "+&r"(rt), "=&r"(rs), 1837 "+&r"(vaddr), "+&r"(err) 1838 : "i"(SIGSEGV) 1839 : "memory"); 1840 1841 MIPS_R2_STATS(stores); 1842 break; 1843 1844 case sdr_op: 1845 if (IS_ENABLED(CONFIG_32BIT)) { 1846 err = SIGILL; 1847 break; 1848 } 1849 1850 rt = regs->regs[MIPSInst_RT(inst)]; 1851 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1852 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1853 current->thread.cp0_baduaddr = vaddr; 1854 err = SIGSEGV; 1855 break; 1856 } 1857 __asm__ __volatile__( 1858 " .set push\n" 1859 " .set reorder\n" 1860 #ifdef CONFIG_CPU_LITTLE_ENDIAN 1861 " dext %1, %0, 0, 8\n" 1862 "1: sb %1, 0(%2)\n" 1863 " daddiu %2, %2, 1\n" 1864 " andi %1, %2, 0x7\n" 1865 " beq $0, %1, 9f\n" 1866 " dext %1, %0, 8, 8\n" 1867 "2: sb %1, 0(%2)\n" 1868 " daddiu %2, %2, 1\n" 1869 " andi %1, %2, 0x7\n" 1870 " beq $0, %1, 9f\n" 1871 " dext %1, %0, 16, 8\n" 1872 "3: sb %1, 0(%2)\n" 1873 " daddiu %2, %2, 1\n" 1874 " andi %1, %2, 0x7\n" 1875 " beq $0, %1, 9f\n" 1876 " dext %1, %0, 24, 8\n" 1877 "4: sb %1, 0(%2)\n" 1878 " daddiu %2, %2, 1\n" 1879 " andi %1, %2, 0x7\n" 1880 " beq $0, %1, 9f\n" 1881 " dextu %1, %0, 32, 8\n" 1882 "5: sb %1, 0(%2)\n" 1883 " daddiu %2, %2, 1\n" 1884 " andi %1, %2, 0x7\n" 1885 " beq $0, %1, 9f\n" 1886 " dextu %1, %0, 40, 8\n" 1887 "6: sb %1, 0(%2)\n" 1888 " daddiu %2, %2, 1\n" 1889 " andi %1, %2, 0x7\n" 1890 " beq $0, %1, 9f\n" 1891 " dextu %1, %0, 48, 8\n" 1892 "7: sb %1, 0(%2)\n" 1893 " daddiu %2, %2, 1\n" 1894 " andi %1, %2, 0x7\n" 1895 " beq $0, %1, 9f\n" 1896 " dextu %1, %0, 56, 8\n" 1897 "0: sb %1, 0(%2)\n" 1898 #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1899 " dext %1, %0, 0, 8\n" 1900 "1: sb %1, 0(%2)\n" 1901 " andi %1, %2, 0x7\n" 1902 " beq $0, %1, 9f\n" 1903 " daddiu %2, %2, -1\n" 1904 " dext %1, %0, 8, 8\n" 1905 "2: sb %1, 0(%2)\n" 1906 " andi %1, %2, 0x7\n" 1907 " beq $0, %1, 9f\n" 1908 " daddiu %2, %2, -1\n" 1909 " dext %1, %0, 16, 8\n" 1910 "3: sb %1, 0(%2)\n" 1911 " andi %1, %2, 0x7\n" 1912 " beq $0, %1, 9f\n" 1913 " daddiu %2, %2, -1\n" 1914 " dext %1, %0, 24, 8\n" 1915 "4: sb %1, 0(%2)\n" 1916 " andi %1, %2, 0x7\n" 1917 " beq $0, %1, 9f\n" 1918 " daddiu %2, %2, -1\n" 1919 " dextu %1, %0, 32, 8\n" 1920 "5: sb %1, 0(%2)\n" 1921 " andi %1, %2, 0x7\n" 1922 " beq $0, %1, 9f\n" 1923 " daddiu %2, %2, -1\n" 1924 " dextu %1, %0, 40, 8\n" 1925 "6: sb %1, 0(%2)\n" 1926 " andi %1, %2, 0x7\n" 1927 " beq $0, %1, 9f\n" 1928 " daddiu %2, %2, -1\n" 1929 " dextu %1, %0, 48, 8\n" 1930 "7: sb %1, 0(%2)\n" 1931 " andi %1, %2, 0x7\n" 1932 " beq $0, %1, 9f\n" 1933 " daddiu %2, %2, -1\n" 1934 " dextu %1, %0, 56, 8\n" 1935 "0: sb %1, 0(%2)\n" 1936 #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1937 "9:\n" 1938 " .insn\n" 1939 " .section .fixup,\"ax\"\n" 1940 "8: li %3,%4\n" 1941 " j 9b\n" 1942 " .previous\n" 1943 " .section __ex_table,\"a\"\n" 1944 STR(PTR) " 1b,8b\n" 1945 STR(PTR) " 2b,8b\n" 1946 STR(PTR) " 3b,8b\n" 1947 STR(PTR) " 4b,8b\n" 1948 STR(PTR) " 5b,8b\n" 1949 STR(PTR) " 6b,8b\n" 1950 STR(PTR) " 7b,8b\n" 1951 STR(PTR) " 0b,8b\n" 1952 " .previous\n" 1953 " .set pop\n" 1954 : "+&r"(rt), "=&r"(rs), 1955 "+&r"(vaddr), "+&r"(err) 1956 : "i"(SIGSEGV) 1957 : "memory"); 1958 1959 MIPS_R2_STATS(stores); 1960 1961 break; 1962 case ll_op: 1963 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1964 if (vaddr & 0x3) { 1965 current->thread.cp0_baduaddr = vaddr; 1966 err = SIGBUS; 1967 break; 1968 } 1969 if (!access_ok(VERIFY_READ, vaddr, 4)) { 1970 current->thread.cp0_baduaddr = vaddr; 1971 err = SIGBUS; 1972 break; 1973 } 1974 1975 if (!cpu_has_rw_llb) { 1976 /* 1977 * An LL/SC block can't be safely emulated without 1978 * a Config5/LLB availability. So it's probably time to 1979 * kill our process before things get any worse. This is 1980 * because Config5/LLB allows us to use ERETNC so that 1981 * the LLAddr/LLB bit is not cleared when we return from 1982 * an exception. MIPS R2 LL/SC instructions trap with an 1983 * RI exception so once we emulate them here, we return 1984 * back to userland with ERETNC. That preserves the 1985 * LLAddr/LLB so the subsequent SC instruction will 1986 * succeed preserving the atomic semantics of the LL/SC 1987 * block. Without that, there is no safe way to emulate 1988 * an LL/SC block in MIPSR2 userland. 1989 */ 1990 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 1991 err = SIGKILL; 1992 break; 1993 } 1994 1995 __asm__ __volatile__( 1996 "1:\n" 1997 "ll %0, 0(%2)\n" 1998 "2:\n" 1999 ".insn\n" 2000 ".section .fixup,\"ax\"\n" 2001 "3:\n" 2002 "li %1, %3\n" 2003 "j 2b\n" 2004 ".previous\n" 2005 ".section __ex_table,\"a\"\n" 2006 STR(PTR) " 1b,3b\n" 2007 ".previous\n" 2008 : "=&r"(res), "+&r"(err) 2009 : "r"(vaddr), "i"(SIGSEGV) 2010 : "memory"); 2011 2012 if (MIPSInst_RT(inst) && !err) 2013 regs->regs[MIPSInst_RT(inst)] = res; 2014 MIPS_R2_STATS(llsc); 2015 2016 break; 2017 2018 case sc_op: 2019 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2020 if (vaddr & 0x3) { 2021 current->thread.cp0_baduaddr = vaddr; 2022 err = SIGBUS; 2023 break; 2024 } 2025 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 2026 current->thread.cp0_baduaddr = vaddr; 2027 err = SIGBUS; 2028 break; 2029 } 2030 2031 if (!cpu_has_rw_llb) { 2032 /* 2033 * An LL/SC block can't be safely emulated without 2034 * a Config5/LLB availability. So it's probably time to 2035 * kill our process before things get any worse. This is 2036 * because Config5/LLB allows us to use ERETNC so that 2037 * the LLAddr/LLB bit is not cleared when we return from 2038 * an exception. MIPS R2 LL/SC instructions trap with an 2039 * RI exception so once we emulate them here, we return 2040 * back to userland with ERETNC. That preserves the 2041 * LLAddr/LLB so the subsequent SC instruction will 2042 * succeed preserving the atomic semantics of the LL/SC 2043 * block. Without that, there is no safe way to emulate 2044 * an LL/SC block in MIPSR2 userland. 2045 */ 2046 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2047 err = SIGKILL; 2048 break; 2049 } 2050 2051 res = regs->regs[MIPSInst_RT(inst)]; 2052 2053 __asm__ __volatile__( 2054 "1:\n" 2055 "sc %0, 0(%2)\n" 2056 "2:\n" 2057 ".insn\n" 2058 ".section .fixup,\"ax\"\n" 2059 "3:\n" 2060 "li %1, %3\n" 2061 "j 2b\n" 2062 ".previous\n" 2063 ".section __ex_table,\"a\"\n" 2064 STR(PTR) " 1b,3b\n" 2065 ".previous\n" 2066 : "+&r"(res), "+&r"(err) 2067 : "r"(vaddr), "i"(SIGSEGV)); 2068 2069 if (MIPSInst_RT(inst) && !err) 2070 regs->regs[MIPSInst_RT(inst)] = res; 2071 2072 MIPS_R2_STATS(llsc); 2073 2074 break; 2075 2076 case lld_op: 2077 if (IS_ENABLED(CONFIG_32BIT)) { 2078 err = SIGILL; 2079 break; 2080 } 2081 2082 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2083 if (vaddr & 0x7) { 2084 current->thread.cp0_baduaddr = vaddr; 2085 err = SIGBUS; 2086 break; 2087 } 2088 if (!access_ok(VERIFY_READ, vaddr, 8)) { 2089 current->thread.cp0_baduaddr = vaddr; 2090 err = SIGBUS; 2091 break; 2092 } 2093 2094 if (!cpu_has_rw_llb) { 2095 /* 2096 * An LL/SC block can't be safely emulated without 2097 * a Config5/LLB availability. So it's probably time to 2098 * kill our process before things get any worse. This is 2099 * because Config5/LLB allows us to use ERETNC so that 2100 * the LLAddr/LLB bit is not cleared when we return from 2101 * an exception. MIPS R2 LL/SC instructions trap with an 2102 * RI exception so once we emulate them here, we return 2103 * back to userland with ERETNC. That preserves the 2104 * LLAddr/LLB so the subsequent SC instruction will 2105 * succeed preserving the atomic semantics of the LL/SC 2106 * block. Without that, there is no safe way to emulate 2107 * an LL/SC block in MIPSR2 userland. 2108 */ 2109 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2110 err = SIGKILL; 2111 break; 2112 } 2113 2114 __asm__ __volatile__( 2115 "1:\n" 2116 "lld %0, 0(%2)\n" 2117 "2:\n" 2118 ".insn\n" 2119 ".section .fixup,\"ax\"\n" 2120 "3:\n" 2121 "li %1, %3\n" 2122 "j 2b\n" 2123 ".previous\n" 2124 ".section __ex_table,\"a\"\n" 2125 STR(PTR) " 1b,3b\n" 2126 ".previous\n" 2127 : "=&r"(res), "+&r"(err) 2128 : "r"(vaddr), "i"(SIGSEGV) 2129 : "memory"); 2130 if (MIPSInst_RT(inst) && !err) 2131 regs->regs[MIPSInst_RT(inst)] = res; 2132 2133 MIPS_R2_STATS(llsc); 2134 2135 break; 2136 2137 case scd_op: 2138 if (IS_ENABLED(CONFIG_32BIT)) { 2139 err = SIGILL; 2140 break; 2141 } 2142 2143 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2144 if (vaddr & 0x7) { 2145 current->thread.cp0_baduaddr = vaddr; 2146 err = SIGBUS; 2147 break; 2148 } 2149 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 2150 current->thread.cp0_baduaddr = vaddr; 2151 err = SIGBUS; 2152 break; 2153 } 2154 2155 if (!cpu_has_rw_llb) { 2156 /* 2157 * An LL/SC block can't be safely emulated without 2158 * a Config5/LLB availability. So it's probably time to 2159 * kill our process before things get any worse. This is 2160 * because Config5/LLB allows us to use ERETNC so that 2161 * the LLAddr/LLB bit is not cleared when we return from 2162 * an exception. MIPS R2 LL/SC instructions trap with an 2163 * RI exception so once we emulate them here, we return 2164 * back to userland with ERETNC. That preserves the 2165 * LLAddr/LLB so the subsequent SC instruction will 2166 * succeed preserving the atomic semantics of the LL/SC 2167 * block. Without that, there is no safe way to emulate 2168 * an LL/SC block in MIPSR2 userland. 2169 */ 2170 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2171 err = SIGKILL; 2172 break; 2173 } 2174 2175 res = regs->regs[MIPSInst_RT(inst)]; 2176 2177 __asm__ __volatile__( 2178 "1:\n" 2179 "scd %0, 0(%2)\n" 2180 "2:\n" 2181 ".insn\n" 2182 ".section .fixup,\"ax\"\n" 2183 "3:\n" 2184 "li %1, %3\n" 2185 "j 2b\n" 2186 ".previous\n" 2187 ".section __ex_table,\"a\"\n" 2188 STR(PTR) " 1b,3b\n" 2189 ".previous\n" 2190 : "+&r"(res), "+&r"(err) 2191 : "r"(vaddr), "i"(SIGSEGV)); 2192 2193 if (MIPSInst_RT(inst) && !err) 2194 regs->regs[MIPSInst_RT(inst)] = res; 2195 2196 MIPS_R2_STATS(llsc); 2197 2198 break; 2199 case pref_op: 2200 /* skip it */ 2201 break; 2202 default: 2203 err = SIGILL; 2204 } 2205 2206 /* 2207 * Let's not return to userland just yet. It's costly and 2208 * it's likely we have more R2 instructions to emulate 2209 */ 2210 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { 2211 regs->cp0_cause &= ~CAUSEF_BD; 2212 err = get_user(inst, (u32 __user *)regs->cp0_epc); 2213 if (!err) 2214 goto repeat; 2215 2216 if (err < 0) 2217 err = SIGSEGV; 2218 } 2219 2220 if (err && (err != SIGEMT)) { 2221 regs->regs[31] = r31; 2222 regs->cp0_epc = epc; 2223 } 2224 2225 /* Likely a MIPS R6 compatible instruction */ 2226 if (pass && (err == SIGILL)) 2227 err = 0; 2228 2229 return err; 2230 } 2231 2232 #ifdef CONFIG_DEBUG_FS 2233 2234 static int mipsr2_stats_show(struct seq_file *s, void *unused) 2235 { 2236 2237 seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); 2238 seq_printf(s, "movs\t\t%ld\t%ld\n", 2239 (unsigned long)__this_cpu_read(mipsr2emustats.movs), 2240 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); 2241 seq_printf(s, "hilo\t\t%ld\t%ld\n", 2242 (unsigned long)__this_cpu_read(mipsr2emustats.hilo), 2243 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); 2244 seq_printf(s, "muls\t\t%ld\t%ld\n", 2245 (unsigned long)__this_cpu_read(mipsr2emustats.muls), 2246 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); 2247 seq_printf(s, "divs\t\t%ld\t%ld\n", 2248 (unsigned long)__this_cpu_read(mipsr2emustats.divs), 2249 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); 2250 seq_printf(s, "dsps\t\t%ld\t%ld\n", 2251 (unsigned long)__this_cpu_read(mipsr2emustats.dsps), 2252 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); 2253 seq_printf(s, "bops\t\t%ld\t%ld\n", 2254 (unsigned long)__this_cpu_read(mipsr2emustats.bops), 2255 (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); 2256 seq_printf(s, "traps\t\t%ld\t%ld\n", 2257 (unsigned long)__this_cpu_read(mipsr2emustats.traps), 2258 (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); 2259 seq_printf(s, "fpus\t\t%ld\t%ld\n", 2260 (unsigned long)__this_cpu_read(mipsr2emustats.fpus), 2261 (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); 2262 seq_printf(s, "loads\t\t%ld\t%ld\n", 2263 (unsigned long)__this_cpu_read(mipsr2emustats.loads), 2264 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); 2265 seq_printf(s, "stores\t\t%ld\t%ld\n", 2266 (unsigned long)__this_cpu_read(mipsr2emustats.stores), 2267 (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); 2268 seq_printf(s, "llsc\t\t%ld\t%ld\n", 2269 (unsigned long)__this_cpu_read(mipsr2emustats.llsc), 2270 (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); 2271 seq_printf(s, "dsemul\t\t%ld\t%ld\n", 2272 (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), 2273 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); 2274 seq_printf(s, "jr\t\t%ld\n", 2275 (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); 2276 seq_printf(s, "bltzl\t\t%ld\n", 2277 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); 2278 seq_printf(s, "bgezl\t\t%ld\n", 2279 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); 2280 seq_printf(s, "bltzll\t\t%ld\n", 2281 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); 2282 seq_printf(s, "bgezll\t\t%ld\n", 2283 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); 2284 seq_printf(s, "bltzal\t\t%ld\n", 2285 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); 2286 seq_printf(s, "bgezal\t\t%ld\n", 2287 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); 2288 seq_printf(s, "beql\t\t%ld\n", 2289 (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); 2290 seq_printf(s, "bnel\t\t%ld\n", 2291 (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); 2292 seq_printf(s, "blezl\t\t%ld\n", 2293 (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); 2294 seq_printf(s, "bgtzl\t\t%ld\n", 2295 (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); 2296 2297 return 0; 2298 } 2299 2300 static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) 2301 { 2302 mipsr2_stats_show(s, unused); 2303 2304 __this_cpu_write((mipsr2emustats).movs, 0); 2305 __this_cpu_write((mipsr2bdemustats).movs, 0); 2306 __this_cpu_write((mipsr2emustats).hilo, 0); 2307 __this_cpu_write((mipsr2bdemustats).hilo, 0); 2308 __this_cpu_write((mipsr2emustats).muls, 0); 2309 __this_cpu_write((mipsr2bdemustats).muls, 0); 2310 __this_cpu_write((mipsr2emustats).divs, 0); 2311 __this_cpu_write((mipsr2bdemustats).divs, 0); 2312 __this_cpu_write((mipsr2emustats).dsps, 0); 2313 __this_cpu_write((mipsr2bdemustats).dsps, 0); 2314 __this_cpu_write((mipsr2emustats).bops, 0); 2315 __this_cpu_write((mipsr2bdemustats).bops, 0); 2316 __this_cpu_write((mipsr2emustats).traps, 0); 2317 __this_cpu_write((mipsr2bdemustats).traps, 0); 2318 __this_cpu_write((mipsr2emustats).fpus, 0); 2319 __this_cpu_write((mipsr2bdemustats).fpus, 0); 2320 __this_cpu_write((mipsr2emustats).loads, 0); 2321 __this_cpu_write((mipsr2bdemustats).loads, 0); 2322 __this_cpu_write((mipsr2emustats).stores, 0); 2323 __this_cpu_write((mipsr2bdemustats).stores, 0); 2324 __this_cpu_write((mipsr2emustats).llsc, 0); 2325 __this_cpu_write((mipsr2bdemustats).llsc, 0); 2326 __this_cpu_write((mipsr2emustats).dsemul, 0); 2327 __this_cpu_write((mipsr2bdemustats).dsemul, 0); 2328 __this_cpu_write((mipsr2bremustats).jrs, 0); 2329 __this_cpu_write((mipsr2bremustats).bltzl, 0); 2330 __this_cpu_write((mipsr2bremustats).bgezl, 0); 2331 __this_cpu_write((mipsr2bremustats).bltzll, 0); 2332 __this_cpu_write((mipsr2bremustats).bgezll, 0); 2333 __this_cpu_write((mipsr2bremustats).bltzal, 0); 2334 __this_cpu_write((mipsr2bremustats).bgezal, 0); 2335 __this_cpu_write((mipsr2bremustats).beql, 0); 2336 __this_cpu_write((mipsr2bremustats).bnel, 0); 2337 __this_cpu_write((mipsr2bremustats).blezl, 0); 2338 __this_cpu_write((mipsr2bremustats).bgtzl, 0); 2339 2340 return 0; 2341 } 2342 2343 static int mipsr2_stats_open(struct inode *inode, struct file *file) 2344 { 2345 return single_open(file, mipsr2_stats_show, inode->i_private); 2346 } 2347 2348 static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) 2349 { 2350 return single_open(file, mipsr2_stats_clear_show, inode->i_private); 2351 } 2352 2353 static const struct file_operations mipsr2_emul_fops = { 2354 .open = mipsr2_stats_open, 2355 .read = seq_read, 2356 .llseek = seq_lseek, 2357 .release = single_release, 2358 }; 2359 2360 static const struct file_operations mipsr2_clear_fops = { 2361 .open = mipsr2_stats_clear_open, 2362 .read = seq_read, 2363 .llseek = seq_lseek, 2364 .release = single_release, 2365 }; 2366 2367 2368 static int __init mipsr2_init_debugfs(void) 2369 { 2370 struct dentry *mipsr2_emul; 2371 2372 if (!mips_debugfs_dir) 2373 return -ENODEV; 2374 2375 mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, 2376 mips_debugfs_dir, NULL, 2377 &mipsr2_emul_fops); 2378 if (!mipsr2_emul) 2379 return -ENOMEM; 2380 2381 mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, 2382 mips_debugfs_dir, NULL, 2383 &mipsr2_clear_fops); 2384 if (!mipsr2_emul) 2385 return -ENOMEM; 2386 2387 return 0; 2388 } 2389 2390 device_initcall(mipsr2_init_debugfs); 2391 2392 #endif /* CONFIG_DEBUG_FS */ 2393