1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Single-step support. 4 * 5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 6 */ 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/ptrace.h> 10 #include <linux/prefetch.h> 11 #include <asm/sstep.h> 12 #include <asm/processor.h> 13 #include <linux/uaccess.h> 14 #include <asm/cpu_has_feature.h> 15 #include <asm/cputable.h> 16 #include <asm/disassemble.h> 17 18 extern char system_call_common[]; 19 extern char system_call_vectored_emulate[]; 20 21 #ifdef CONFIG_PPC64 22 /* Bits in SRR1 that are copied from MSR */ 23 #define MSR_MASK 0xffffffff87c0ffffUL 24 #else 25 #define MSR_MASK 0x87c0ffff 26 #endif 27 28 /* Bits in XER */ 29 #define XER_SO 0x80000000U 30 #define XER_OV 0x40000000U 31 #define XER_CA 0x20000000U 32 #define XER_OV32 0x00080000U 33 #define XER_CA32 0x00040000U 34 35 #ifdef CONFIG_VSX 36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) 37 #endif 38 39 #ifdef CONFIG_PPC_FPU 40 /* 41 * Functions in ldstfp.S 42 */ 43 extern void get_fpr(int rn, double *p); 44 extern void put_fpr(int rn, const double *p); 45 extern void get_vr(int rn, __vector128 *p); 46 extern void put_vr(int rn, __vector128 *p); 47 extern void load_vsrn(int vsr, const void *p); 48 extern void store_vsrn(int vsr, void *p); 49 extern void conv_sp_to_dp(const float *sp, double *dp); 50 extern void conv_dp_to_sp(const double *dp, float *sp); 51 #endif 52 53 #ifdef __powerpc64__ 54 /* 55 * Functions in quad.S 56 */ 57 extern int do_lq(unsigned long ea, unsigned long *regs); 58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); 59 extern int do_lqarx(unsigned long ea, unsigned long *regs); 60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, 61 unsigned int *crp); 62 #endif 63 64 #ifdef __LITTLE_ENDIAN__ 65 #define IS_LE 1 66 #define IS_BE 0 67 #else 68 #define IS_LE 0 69 #define IS_BE 1 70 #endif 71 72 /* 73 * Emulate the truncation of 64 bit values in 32-bit mode. 74 */ 75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, 76 unsigned long val) 77 { 78 if ((msr & MSR_64BIT) == 0) 79 val &= 0xffffffffUL; 80 return val; 81 } 82 83 /* 84 * Determine whether a conditional branch instruction would branch. 85 */ 86 static nokprobe_inline int branch_taken(unsigned int instr, 87 const struct pt_regs *regs, 88 struct instruction_op *op) 89 { 90 unsigned int bo = (instr >> 21) & 0x1f; 91 unsigned int bi; 92 93 if ((bo & 4) == 0) { 94 /* decrement counter */ 95 op->type |= DECCTR; 96 if (((bo >> 1) & 1) ^ (regs->ctr == 1)) 97 return 0; 98 } 99 if ((bo & 0x10) == 0) { 100 /* check bit from CR */ 101 bi = (instr >> 16) & 0x1f; 102 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 103 return 0; 104 } 105 return 1; 106 } 107 108 static nokprobe_inline long address_ok(struct pt_regs *regs, 109 unsigned long ea, int nb) 110 { 111 if (!user_mode(regs)) 112 return 1; 113 if (__access_ok(ea, nb)) 114 return 1; 115 if (__access_ok(ea, 1)) 116 /* Access overlaps the end of the user region */ 117 regs->dar = TASK_SIZE_MAX - 1; 118 else 119 regs->dar = ea; 120 return 0; 121 } 122 123 /* 124 * Calculate effective address for a D-form instruction 125 */ 126 static nokprobe_inline unsigned long dform_ea(unsigned int instr, 127 const struct pt_regs *regs) 128 { 129 int ra; 130 unsigned long ea; 131 132 ra = (instr >> 16) & 0x1f; 133 ea = (signed short) instr; /* sign-extend */ 134 if (ra) 135 ea += regs->gpr[ra]; 136 137 return ea; 138 } 139 140 #ifdef __powerpc64__ 141 /* 142 * Calculate effective address for a DS-form instruction 143 */ 144 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 145 const struct pt_regs *regs) 146 { 147 int ra; 148 unsigned long ea; 149 150 ra = (instr >> 16) & 0x1f; 151 ea = (signed short) (instr & ~3); /* sign-extend */ 152 if (ra) 153 ea += regs->gpr[ra]; 154 155 return ea; 156 } 157 158 /* 159 * Calculate effective address for a DQ-form instruction 160 */ 161 static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 162 const struct pt_regs *regs) 163 { 164 int ra; 165 unsigned long ea; 166 167 ra = (instr >> 16) & 0x1f; 168 ea = (signed short) (instr & ~0xf); /* sign-extend */ 169 if (ra) 170 ea += regs->gpr[ra]; 171 172 return ea; 173 } 174 #endif /* __powerpc64 */ 175 176 /* 177 * Calculate effective address for an X-form instruction 178 */ 179 static nokprobe_inline unsigned long xform_ea(unsigned int instr, 180 const struct pt_regs *regs) 181 { 182 int ra, rb; 183 unsigned long ea; 184 185 ra = (instr >> 16) & 0x1f; 186 rb = (instr >> 11) & 0x1f; 187 ea = regs->gpr[rb]; 188 if (ra) 189 ea += regs->gpr[ra]; 190 191 return ea; 192 } 193 194 /* 195 * Calculate effective address for a MLS:D-form / 8LS:D-form 196 * prefixed instruction 197 */ 198 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, 199 unsigned int suffix, 200 const struct pt_regs *regs) 201 { 202 int ra, prefix_r; 203 unsigned int dd; 204 unsigned long ea, d0, d1, d; 205 206 prefix_r = GET_PREFIX_R(instr); 207 ra = GET_PREFIX_RA(suffix); 208 209 d0 = instr & 0x3ffff; 210 d1 = suffix & 0xffff; 211 d = (d0 << 16) | d1; 212 213 /* 214 * sign extend a 34 bit number 215 */ 216 dd = (unsigned int)(d >> 2); 217 ea = (signed int)dd; 218 ea = (ea << 2) | (d & 0x3); 219 220 if (!prefix_r && ra) 221 ea += regs->gpr[ra]; 222 else if (!prefix_r && !ra) 223 ; /* Leave ea as is */ 224 else if (prefix_r) 225 ea += regs->nip; 226 227 /* 228 * (prefix_r && ra) is an invalid form. Should already be 229 * checked for by caller! 230 */ 231 232 return ea; 233 } 234 235 /* 236 * Return the largest power of 2, not greater than sizeof(unsigned long), 237 * such that x is a multiple of it. 238 */ 239 static nokprobe_inline unsigned long max_align(unsigned long x) 240 { 241 x |= sizeof(unsigned long); 242 return x & -x; /* isolates rightmost bit */ 243 } 244 245 static nokprobe_inline unsigned long byterev_2(unsigned long x) 246 { 247 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 248 } 249 250 static nokprobe_inline unsigned long byterev_4(unsigned long x) 251 { 252 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 253 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 254 } 255 256 #ifdef __powerpc64__ 257 static nokprobe_inline unsigned long byterev_8(unsigned long x) 258 { 259 return (byterev_4(x) << 32) | byterev_4(x >> 32); 260 } 261 #endif 262 263 static nokprobe_inline void do_byte_reverse(void *ptr, int nb) 264 { 265 switch (nb) { 266 case 2: 267 *(u16 *)ptr = byterev_2(*(u16 *)ptr); 268 break; 269 case 4: 270 *(u32 *)ptr = byterev_4(*(u32 *)ptr); 271 break; 272 #ifdef __powerpc64__ 273 case 8: 274 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); 275 break; 276 case 16: { 277 unsigned long *up = (unsigned long *)ptr; 278 unsigned long tmp; 279 tmp = byterev_8(up[0]); 280 up[0] = byterev_8(up[1]); 281 up[1] = tmp; 282 break; 283 } 284 case 32: { 285 unsigned long *up = (unsigned long *)ptr; 286 unsigned long tmp; 287 288 tmp = byterev_8(up[0]); 289 up[0] = byterev_8(up[3]); 290 up[3] = tmp; 291 tmp = byterev_8(up[2]); 292 up[2] = byterev_8(up[1]); 293 up[1] = tmp; 294 break; 295 } 296 297 #endif 298 default: 299 WARN_ON_ONCE(1); 300 } 301 } 302 303 static __always_inline int 304 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 305 { 306 unsigned long x = 0; 307 308 switch (nb) { 309 case 1: 310 unsafe_get_user(x, (unsigned char __user *)ea, Efault); 311 break; 312 case 2: 313 unsafe_get_user(x, (unsigned short __user *)ea, Efault); 314 break; 315 case 4: 316 unsafe_get_user(x, (unsigned int __user *)ea, Efault); 317 break; 318 #ifdef __powerpc64__ 319 case 8: 320 unsafe_get_user(x, (unsigned long __user *)ea, Efault); 321 break; 322 #endif 323 } 324 *dest = x; 325 return 0; 326 327 Efault: 328 regs->dar = ea; 329 return -EFAULT; 330 } 331 332 static nokprobe_inline int 333 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 334 { 335 int err; 336 337 if (is_kernel_addr(ea)) 338 return __read_mem_aligned(dest, ea, nb, regs); 339 340 if (user_read_access_begin((void __user *)ea, nb)) { 341 err = __read_mem_aligned(dest, ea, nb, regs); 342 user_read_access_end(); 343 } else { 344 err = -EFAULT; 345 regs->dar = ea; 346 } 347 348 return err; 349 } 350 351 /* 352 * Copy from userspace to a buffer, using the largest possible 353 * aligned accesses, up to sizeof(long). 354 */ 355 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 356 { 357 int c; 358 359 for (; nb > 0; nb -= c) { 360 c = max_align(ea); 361 if (c > nb) 362 c = max_align(nb); 363 switch (c) { 364 case 1: 365 unsafe_get_user(*dest, (u8 __user *)ea, Efault); 366 break; 367 case 2: 368 unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); 369 break; 370 case 4: 371 unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); 372 break; 373 #ifdef __powerpc64__ 374 case 8: 375 unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); 376 break; 377 #endif 378 } 379 dest += c; 380 ea += c; 381 } 382 return 0; 383 384 Efault: 385 regs->dar = ea; 386 return -EFAULT; 387 } 388 389 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 390 { 391 int err; 392 393 if (is_kernel_addr(ea)) 394 return __copy_mem_in(dest, ea, nb, regs); 395 396 if (user_read_access_begin((void __user *)ea, nb)) { 397 err = __copy_mem_in(dest, ea, nb, regs); 398 user_read_access_end(); 399 } else { 400 err = -EFAULT; 401 regs->dar = ea; 402 } 403 404 return err; 405 } 406 407 static nokprobe_inline int read_mem_unaligned(unsigned long *dest, 408 unsigned long ea, int nb, 409 struct pt_regs *regs) 410 { 411 union { 412 unsigned long ul; 413 u8 b[sizeof(unsigned long)]; 414 } u; 415 int i; 416 int err; 417 418 u.ul = 0; 419 i = IS_BE ? sizeof(unsigned long) - nb : 0; 420 err = copy_mem_in(&u.b[i], ea, nb, regs); 421 if (!err) 422 *dest = u.ul; 423 return err; 424 } 425 426 /* 427 * Read memory at address ea for nb bytes, return 0 for success 428 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 429 * If nb < sizeof(long), the result is right-justified on BE systems. 430 */ 431 static int read_mem(unsigned long *dest, unsigned long ea, int nb, 432 struct pt_regs *regs) 433 { 434 if (!address_ok(regs, ea, nb)) 435 return -EFAULT; 436 if ((ea & (nb - 1)) == 0) 437 return read_mem_aligned(dest, ea, nb, regs); 438 return read_mem_unaligned(dest, ea, nb, regs); 439 } 440 NOKPROBE_SYMBOL(read_mem); 441 442 static __always_inline int 443 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 444 { 445 switch (nb) { 446 case 1: 447 unsafe_put_user(val, (unsigned char __user *)ea, Efault); 448 break; 449 case 2: 450 unsafe_put_user(val, (unsigned short __user *)ea, Efault); 451 break; 452 case 4: 453 unsafe_put_user(val, (unsigned int __user *)ea, Efault); 454 break; 455 #ifdef __powerpc64__ 456 case 8: 457 unsafe_put_user(val, (unsigned long __user *)ea, Efault); 458 break; 459 #endif 460 } 461 return 0; 462 463 Efault: 464 regs->dar = ea; 465 return -EFAULT; 466 } 467 468 static nokprobe_inline int 469 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 470 { 471 int err; 472 473 if (is_kernel_addr(ea)) 474 return __write_mem_aligned(val, ea, nb, regs); 475 476 if (user_write_access_begin((void __user *)ea, nb)) { 477 err = __write_mem_aligned(val, ea, nb, regs); 478 user_write_access_end(); 479 } else { 480 err = -EFAULT; 481 regs->dar = ea; 482 } 483 484 return err; 485 } 486 487 /* 488 * Copy from a buffer to userspace, using the largest possible 489 * aligned accesses, up to sizeof(long). 490 */ 491 static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 492 { 493 int c; 494 495 for (; nb > 0; nb -= c) { 496 c = max_align(ea); 497 if (c > nb) 498 c = max_align(nb); 499 switch (c) { 500 case 1: 501 unsafe_put_user(*dest, (u8 __user *)ea, Efault); 502 break; 503 case 2: 504 unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); 505 break; 506 case 4: 507 unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); 508 break; 509 #ifdef __powerpc64__ 510 case 8: 511 unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); 512 break; 513 #endif 514 } 515 dest += c; 516 ea += c; 517 } 518 return 0; 519 520 Efault: 521 regs->dar = ea; 522 return -EFAULT; 523 } 524 525 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 526 { 527 int err; 528 529 if (is_kernel_addr(ea)) 530 return __copy_mem_out(dest, ea, nb, regs); 531 532 if (user_write_access_begin((void __user *)ea, nb)) { 533 err = __copy_mem_out(dest, ea, nb, regs); 534 user_write_access_end(); 535 } else { 536 err = -EFAULT; 537 regs->dar = ea; 538 } 539 540 return err; 541 } 542 543 static nokprobe_inline int write_mem_unaligned(unsigned long val, 544 unsigned long ea, int nb, 545 struct pt_regs *regs) 546 { 547 union { 548 unsigned long ul; 549 u8 b[sizeof(unsigned long)]; 550 } u; 551 int i; 552 553 u.ul = val; 554 i = IS_BE ? sizeof(unsigned long) - nb : 0; 555 return copy_mem_out(&u.b[i], ea, nb, regs); 556 } 557 558 /* 559 * Write memory at address ea for nb bytes, return 0 for success 560 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 561 */ 562 static int write_mem(unsigned long val, unsigned long ea, int nb, 563 struct pt_regs *regs) 564 { 565 if (!address_ok(regs, ea, nb)) 566 return -EFAULT; 567 if ((ea & (nb - 1)) == 0) 568 return write_mem_aligned(val, ea, nb, regs); 569 return write_mem_unaligned(val, ea, nb, regs); 570 } 571 NOKPROBE_SYMBOL(write_mem); 572 573 #ifdef CONFIG_PPC_FPU 574 /* 575 * These access either the real FP register or the image in the 576 * thread_struct, depending on regs->msr & MSR_FP. 577 */ 578 static int do_fp_load(struct instruction_op *op, unsigned long ea, 579 struct pt_regs *regs, bool cross_endian) 580 { 581 int err, rn, nb; 582 union { 583 int i; 584 unsigned int u; 585 float f; 586 double d[2]; 587 unsigned long l[2]; 588 u8 b[2 * sizeof(double)]; 589 } u; 590 591 nb = GETSIZE(op->type); 592 if (!address_ok(regs, ea, nb)) 593 return -EFAULT; 594 rn = op->reg; 595 err = copy_mem_in(u.b, ea, nb, regs); 596 if (err) 597 return err; 598 if (unlikely(cross_endian)) { 599 do_byte_reverse(u.b, min(nb, 8)); 600 if (nb == 16) 601 do_byte_reverse(&u.b[8], 8); 602 } 603 preempt_disable(); 604 if (nb == 4) { 605 if (op->type & FPCONV) 606 conv_sp_to_dp(&u.f, &u.d[0]); 607 else if (op->type & SIGNEXT) 608 u.l[0] = u.i; 609 else 610 u.l[0] = u.u; 611 } 612 if (regs->msr & MSR_FP) 613 put_fpr(rn, &u.d[0]); 614 else 615 current->thread.TS_FPR(rn) = u.l[0]; 616 if (nb == 16) { 617 /* lfdp */ 618 rn |= 1; 619 if (regs->msr & MSR_FP) 620 put_fpr(rn, &u.d[1]); 621 else 622 current->thread.TS_FPR(rn) = u.l[1]; 623 } 624 preempt_enable(); 625 return 0; 626 } 627 NOKPROBE_SYMBOL(do_fp_load); 628 629 static int do_fp_store(struct instruction_op *op, unsigned long ea, 630 struct pt_regs *regs, bool cross_endian) 631 { 632 int rn, nb; 633 union { 634 unsigned int u; 635 float f; 636 double d[2]; 637 unsigned long l[2]; 638 u8 b[2 * sizeof(double)]; 639 } u; 640 641 nb = GETSIZE(op->type); 642 if (!address_ok(regs, ea, nb)) 643 return -EFAULT; 644 rn = op->reg; 645 preempt_disable(); 646 if (regs->msr & MSR_FP) 647 get_fpr(rn, &u.d[0]); 648 else 649 u.l[0] = current->thread.TS_FPR(rn); 650 if (nb == 4) { 651 if (op->type & FPCONV) 652 conv_dp_to_sp(&u.d[0], &u.f); 653 else 654 u.u = u.l[0]; 655 } 656 if (nb == 16) { 657 rn |= 1; 658 if (regs->msr & MSR_FP) 659 get_fpr(rn, &u.d[1]); 660 else 661 u.l[1] = current->thread.TS_FPR(rn); 662 } 663 preempt_enable(); 664 if (unlikely(cross_endian)) { 665 do_byte_reverse(u.b, min(nb, 8)); 666 if (nb == 16) 667 do_byte_reverse(&u.b[8], 8); 668 } 669 return copy_mem_out(u.b, ea, nb, regs); 670 } 671 NOKPROBE_SYMBOL(do_fp_store); 672 #endif 673 674 #ifdef CONFIG_ALTIVEC 675 /* For Altivec/VMX, no need to worry about alignment */ 676 static nokprobe_inline int do_vec_load(int rn, unsigned long ea, 677 int size, struct pt_regs *regs, 678 bool cross_endian) 679 { 680 int err; 681 union { 682 __vector128 v; 683 u8 b[sizeof(__vector128)]; 684 } u = {}; 685 686 if (!address_ok(regs, ea & ~0xfUL, 16)) 687 return -EFAULT; 688 /* align to multiple of size */ 689 ea &= ~(size - 1); 690 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); 691 if (err) 692 return err; 693 if (unlikely(cross_endian)) 694 do_byte_reverse(&u.b[ea & 0xf], size); 695 preempt_disable(); 696 if (regs->msr & MSR_VEC) 697 put_vr(rn, &u.v); 698 else 699 current->thread.vr_state.vr[rn] = u.v; 700 preempt_enable(); 701 return 0; 702 } 703 704 static nokprobe_inline int do_vec_store(int rn, unsigned long ea, 705 int size, struct pt_regs *regs, 706 bool cross_endian) 707 { 708 union { 709 __vector128 v; 710 u8 b[sizeof(__vector128)]; 711 } u; 712 713 if (!address_ok(regs, ea & ~0xfUL, 16)) 714 return -EFAULT; 715 /* align to multiple of size */ 716 ea &= ~(size - 1); 717 718 preempt_disable(); 719 if (regs->msr & MSR_VEC) 720 get_vr(rn, &u.v); 721 else 722 u.v = current->thread.vr_state.vr[rn]; 723 preempt_enable(); 724 if (unlikely(cross_endian)) 725 do_byte_reverse(&u.b[ea & 0xf], size); 726 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 727 } 728 #endif /* CONFIG_ALTIVEC */ 729 730 #ifdef __powerpc64__ 731 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, 732 int reg, bool cross_endian) 733 { 734 int err; 735 736 if (!address_ok(regs, ea, 16)) 737 return -EFAULT; 738 /* if aligned, should be atomic */ 739 if ((ea & 0xf) == 0) { 740 err = do_lq(ea, ®s->gpr[reg]); 741 } else { 742 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); 743 if (!err) 744 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); 745 } 746 if (!err && unlikely(cross_endian)) 747 do_byte_reverse(®s->gpr[reg], 16); 748 return err; 749 } 750 751 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, 752 int reg, bool cross_endian) 753 { 754 int err; 755 unsigned long vals[2]; 756 757 if (!address_ok(regs, ea, 16)) 758 return -EFAULT; 759 vals[0] = regs->gpr[reg]; 760 vals[1] = regs->gpr[reg + 1]; 761 if (unlikely(cross_endian)) 762 do_byte_reverse(vals, 16); 763 764 /* if aligned, should be atomic */ 765 if ((ea & 0xf) == 0) 766 return do_stq(ea, vals[0], vals[1]); 767 768 err = write_mem(vals[IS_LE], ea, 8, regs); 769 if (!err) 770 err = write_mem(vals[IS_BE], ea + 8, 8, regs); 771 return err; 772 } 773 #endif /* __powerpc64 */ 774 775 #ifdef CONFIG_VSX 776 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, 777 const void *mem, bool rev) 778 { 779 int size, read_size; 780 int i, j; 781 const unsigned int *wp; 782 const unsigned short *hp; 783 const unsigned char *bp; 784 785 size = GETSIZE(op->type); 786 reg->d[0] = reg->d[1] = 0; 787 788 switch (op->element_size) { 789 case 32: 790 /* [p]lxvp[x] */ 791 case 16: 792 /* whole vector; lxv[x] or lxvl[l] */ 793 if (size == 0) 794 break; 795 memcpy(reg, mem, size); 796 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 797 rev = !rev; 798 if (rev) 799 do_byte_reverse(reg, size); 800 break; 801 case 8: 802 /* scalar loads, lxvd2x, lxvdsx */ 803 read_size = (size >= 8) ? 8 : size; 804 i = IS_LE ? 8 : 8 - read_size; 805 memcpy(®->b[i], mem, read_size); 806 if (rev) 807 do_byte_reverse(®->b[i], 8); 808 if (size < 8) { 809 if (op->type & SIGNEXT) { 810 /* size == 4 is the only case here */ 811 reg->d[IS_LE] = (signed int) reg->d[IS_LE]; 812 } else if (op->vsx_flags & VSX_FPCONV) { 813 preempt_disable(); 814 conv_sp_to_dp(®->fp[1 + IS_LE], 815 ®->dp[IS_LE]); 816 preempt_enable(); 817 } 818 } else { 819 if (size == 16) { 820 unsigned long v = *(unsigned long *)(mem + 8); 821 reg->d[IS_BE] = !rev ? v : byterev_8(v); 822 } else if (op->vsx_flags & VSX_SPLAT) 823 reg->d[IS_BE] = reg->d[IS_LE]; 824 } 825 break; 826 case 4: 827 /* lxvw4x, lxvwsx */ 828 wp = mem; 829 for (j = 0; j < size / 4; ++j) { 830 i = IS_LE ? 3 - j : j; 831 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); 832 } 833 if (op->vsx_flags & VSX_SPLAT) { 834 u32 val = reg->w[IS_LE ? 3 : 0]; 835 for (; j < 4; ++j) { 836 i = IS_LE ? 3 - j : j; 837 reg->w[i] = val; 838 } 839 } 840 break; 841 case 2: 842 /* lxvh8x */ 843 hp = mem; 844 for (j = 0; j < size / 2; ++j) { 845 i = IS_LE ? 7 - j : j; 846 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); 847 } 848 break; 849 case 1: 850 /* lxvb16x */ 851 bp = mem; 852 for (j = 0; j < size; ++j) { 853 i = IS_LE ? 15 - j : j; 854 reg->b[i] = *bp++; 855 } 856 break; 857 } 858 } 859 EXPORT_SYMBOL_GPL(emulate_vsx_load); 860 NOKPROBE_SYMBOL(emulate_vsx_load); 861 862 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, 863 void *mem, bool rev) 864 { 865 int size, write_size; 866 int i, j; 867 union vsx_reg buf; 868 unsigned int *wp; 869 unsigned short *hp; 870 unsigned char *bp; 871 872 size = GETSIZE(op->type); 873 874 switch (op->element_size) { 875 case 32: 876 /* [p]stxvp[x] */ 877 if (size == 0) 878 break; 879 if (rev) { 880 /* reverse 32 bytes */ 881 union vsx_reg buf32[2]; 882 buf32[0].d[0] = byterev_8(reg[1].d[1]); 883 buf32[0].d[1] = byterev_8(reg[1].d[0]); 884 buf32[1].d[0] = byterev_8(reg[0].d[1]); 885 buf32[1].d[1] = byterev_8(reg[0].d[0]); 886 memcpy(mem, buf32, size); 887 } else { 888 memcpy(mem, reg, size); 889 } 890 break; 891 case 16: 892 /* stxv, stxvx, stxvl, stxvll */ 893 if (size == 0) 894 break; 895 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 896 rev = !rev; 897 if (rev) { 898 /* reverse 16 bytes */ 899 buf.d[0] = byterev_8(reg->d[1]); 900 buf.d[1] = byterev_8(reg->d[0]); 901 reg = &buf; 902 } 903 memcpy(mem, reg, size); 904 break; 905 case 8: 906 /* scalar stores, stxvd2x */ 907 write_size = (size >= 8) ? 8 : size; 908 i = IS_LE ? 8 : 8 - write_size; 909 if (size < 8 && op->vsx_flags & VSX_FPCONV) { 910 buf.d[0] = buf.d[1] = 0; 911 preempt_disable(); 912 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); 913 preempt_enable(); 914 reg = &buf; 915 } 916 memcpy(mem, ®->b[i], write_size); 917 if (size == 16) 918 memcpy(mem + 8, ®->d[IS_BE], 8); 919 if (unlikely(rev)) { 920 do_byte_reverse(mem, write_size); 921 if (size == 16) 922 do_byte_reverse(mem + 8, 8); 923 } 924 break; 925 case 4: 926 /* stxvw4x */ 927 wp = mem; 928 for (j = 0; j < size / 4; ++j) { 929 i = IS_LE ? 3 - j : j; 930 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); 931 } 932 break; 933 case 2: 934 /* stxvh8x */ 935 hp = mem; 936 for (j = 0; j < size / 2; ++j) { 937 i = IS_LE ? 7 - j : j; 938 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); 939 } 940 break; 941 case 1: 942 /* stvxb16x */ 943 bp = mem; 944 for (j = 0; j < size; ++j) { 945 i = IS_LE ? 15 - j : j; 946 *bp++ = reg->b[i]; 947 } 948 break; 949 } 950 } 951 EXPORT_SYMBOL_GPL(emulate_vsx_store); 952 NOKPROBE_SYMBOL(emulate_vsx_store); 953 954 static nokprobe_inline int do_vsx_load(struct instruction_op *op, 955 unsigned long ea, struct pt_regs *regs, 956 bool cross_endian) 957 { 958 int reg = op->reg; 959 int i, j, nr_vsx_regs; 960 u8 mem[32]; 961 union vsx_reg buf[2]; 962 int size = GETSIZE(op->type); 963 964 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) 965 return -EFAULT; 966 967 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 968 emulate_vsx_load(op, buf, mem, cross_endian); 969 preempt_disable(); 970 if (reg < 32) { 971 /* FP regs + extensions */ 972 if (regs->msr & MSR_FP) { 973 for (i = 0; i < nr_vsx_regs; i++) { 974 j = IS_LE ? nr_vsx_regs - i - 1 : i; 975 load_vsrn(reg + i, &buf[j].v); 976 } 977 } else { 978 for (i = 0; i < nr_vsx_regs; i++) { 979 j = IS_LE ? nr_vsx_regs - i - 1 : i; 980 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; 981 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; 982 } 983 } 984 } else { 985 if (regs->msr & MSR_VEC) { 986 for (i = 0; i < nr_vsx_regs; i++) { 987 j = IS_LE ? nr_vsx_regs - i - 1 : i; 988 load_vsrn(reg + i, &buf[j].v); 989 } 990 } else { 991 for (i = 0; i < nr_vsx_regs; i++) { 992 j = IS_LE ? nr_vsx_regs - i - 1 : i; 993 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; 994 } 995 } 996 } 997 preempt_enable(); 998 return 0; 999 } 1000 1001 static nokprobe_inline int do_vsx_store(struct instruction_op *op, 1002 unsigned long ea, struct pt_regs *regs, 1003 bool cross_endian) 1004 { 1005 int reg = op->reg; 1006 int i, j, nr_vsx_regs; 1007 u8 mem[32]; 1008 union vsx_reg buf[2]; 1009 int size = GETSIZE(op->type); 1010 1011 if (!address_ok(regs, ea, size)) 1012 return -EFAULT; 1013 1014 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 1015 preempt_disable(); 1016 if (reg < 32) { 1017 /* FP regs + extensions */ 1018 if (regs->msr & MSR_FP) { 1019 for (i = 0; i < nr_vsx_regs; i++) { 1020 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1021 store_vsrn(reg + i, &buf[j].v); 1022 } 1023 } else { 1024 for (i = 0; i < nr_vsx_regs; i++) { 1025 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1026 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; 1027 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; 1028 } 1029 } 1030 } else { 1031 if (regs->msr & MSR_VEC) { 1032 for (i = 0; i < nr_vsx_regs; i++) { 1033 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1034 store_vsrn(reg + i, &buf[j].v); 1035 } 1036 } else { 1037 for (i = 0; i < nr_vsx_regs; i++) { 1038 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1039 buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; 1040 } 1041 } 1042 } 1043 preempt_enable(); 1044 emulate_vsx_store(op, buf, mem, cross_endian); 1045 return copy_mem_out(mem, ea, size, regs); 1046 } 1047 #endif /* CONFIG_VSX */ 1048 1049 static int __emulate_dcbz(unsigned long ea) 1050 { 1051 unsigned long i; 1052 unsigned long size = l1_dcache_bytes(); 1053 1054 for (i = 0; i < size; i += sizeof(long)) 1055 unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); 1056 1057 return 0; 1058 1059 Efault: 1060 return -EFAULT; 1061 } 1062 1063 int emulate_dcbz(unsigned long ea, struct pt_regs *regs) 1064 { 1065 int err; 1066 unsigned long size = l1_dcache_bytes(); 1067 1068 ea = truncate_if_32bit(regs->msr, ea); 1069 ea &= ~(size - 1); 1070 if (!address_ok(regs, ea, size)) 1071 return -EFAULT; 1072 1073 if (is_kernel_addr(ea)) { 1074 err = __emulate_dcbz(ea); 1075 } else if (user_write_access_begin((void __user *)ea, size)) { 1076 err = __emulate_dcbz(ea); 1077 user_write_access_end(); 1078 } else { 1079 err = -EFAULT; 1080 } 1081 1082 if (err) 1083 regs->dar = ea; 1084 1085 1086 return err; 1087 } 1088 NOKPROBE_SYMBOL(emulate_dcbz); 1089 1090 #define __put_user_asmx(x, addr, err, op, cr) \ 1091 __asm__ __volatile__( \ 1092 "1: " op " %2,0,%3\n" \ 1093 " mfcr %1\n" \ 1094 "2:\n" \ 1095 ".section .fixup,\"ax\"\n" \ 1096 "3: li %0,%4\n" \ 1097 " b 2b\n" \ 1098 ".previous\n" \ 1099 EX_TABLE(1b, 3b) \ 1100 : "=r" (err), "=r" (cr) \ 1101 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 1102 1103 #define __get_user_asmx(x, addr, err, op) \ 1104 __asm__ __volatile__( \ 1105 "1: "op" %1,0,%2\n" \ 1106 "2:\n" \ 1107 ".section .fixup,\"ax\"\n" \ 1108 "3: li %0,%3\n" \ 1109 " b 2b\n" \ 1110 ".previous\n" \ 1111 EX_TABLE(1b, 3b) \ 1112 : "=r" (err), "=r" (x) \ 1113 : "r" (addr), "i" (-EFAULT), "0" (err)) 1114 1115 #define __cacheop_user_asmx(addr, err, op) \ 1116 __asm__ __volatile__( \ 1117 "1: "op" 0,%1\n" \ 1118 "2:\n" \ 1119 ".section .fixup,\"ax\"\n" \ 1120 "3: li %0,%3\n" \ 1121 " b 2b\n" \ 1122 ".previous\n" \ 1123 EX_TABLE(1b, 3b) \ 1124 : "=r" (err) \ 1125 : "r" (addr), "i" (-EFAULT), "0" (err)) 1126 1127 static nokprobe_inline void set_cr0(const struct pt_regs *regs, 1128 struct instruction_op *op) 1129 { 1130 long val = op->val; 1131 1132 op->type |= SETCC; 1133 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 1134 if (!(regs->msr & MSR_64BIT)) 1135 val = (int) val; 1136 if (val < 0) 1137 op->ccval |= 0x80000000; 1138 else if (val > 0) 1139 op->ccval |= 0x40000000; 1140 else 1141 op->ccval |= 0x20000000; 1142 } 1143 1144 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) 1145 { 1146 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1147 if (val) 1148 op->xerval |= XER_CA32; 1149 else 1150 op->xerval &= ~XER_CA32; 1151 } 1152 } 1153 1154 static nokprobe_inline void add_with_carry(const struct pt_regs *regs, 1155 struct instruction_op *op, int rd, 1156 unsigned long val1, unsigned long val2, 1157 unsigned long carry_in) 1158 { 1159 unsigned long val = val1 + val2; 1160 1161 if (carry_in) 1162 ++val; 1163 op->type = COMPUTE + SETREG + SETXER; 1164 op->reg = rd; 1165 op->val = val; 1166 val = truncate_if_32bit(regs->msr, val); 1167 val1 = truncate_if_32bit(regs->msr, val1); 1168 op->xerval = regs->xer; 1169 if (val < val1 || (carry_in && val == val1)) 1170 op->xerval |= XER_CA; 1171 else 1172 op->xerval &= ~XER_CA; 1173 1174 set_ca32(op, (unsigned int)val < (unsigned int)val1 || 1175 (carry_in && (unsigned int)val == (unsigned int)val1)); 1176 } 1177 1178 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, 1179 struct instruction_op *op, 1180 long v1, long v2, int crfld) 1181 { 1182 unsigned int crval, shift; 1183 1184 op->type = COMPUTE + SETCC; 1185 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1186 if (v1 < v2) 1187 crval |= 8; 1188 else if (v1 > v2) 1189 crval |= 4; 1190 else 1191 crval |= 2; 1192 shift = (7 - crfld) * 4; 1193 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1194 } 1195 1196 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, 1197 struct instruction_op *op, 1198 unsigned long v1, 1199 unsigned long v2, int crfld) 1200 { 1201 unsigned int crval, shift; 1202 1203 op->type = COMPUTE + SETCC; 1204 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1205 if (v1 < v2) 1206 crval |= 8; 1207 else if (v1 > v2) 1208 crval |= 4; 1209 else 1210 crval |= 2; 1211 shift = (7 - crfld) * 4; 1212 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1213 } 1214 1215 static nokprobe_inline void do_cmpb(const struct pt_regs *regs, 1216 struct instruction_op *op, 1217 unsigned long v1, unsigned long v2) 1218 { 1219 unsigned long long out_val, mask; 1220 int i; 1221 1222 out_val = 0; 1223 for (i = 0; i < 8; i++) { 1224 mask = 0xffUL << (i * 8); 1225 if ((v1 & mask) == (v2 & mask)) 1226 out_val |= mask; 1227 } 1228 op->val = out_val; 1229 } 1230 1231 /* 1232 * The size parameter is used to adjust the equivalent popcnt instruction. 1233 * popcntb = 8, popcntw = 32, popcntd = 64 1234 */ 1235 static nokprobe_inline void do_popcnt(const struct pt_regs *regs, 1236 struct instruction_op *op, 1237 unsigned long v1, int size) 1238 { 1239 unsigned long long out = v1; 1240 1241 out -= (out >> 1) & 0x5555555555555555ULL; 1242 out = (0x3333333333333333ULL & out) + 1243 (0x3333333333333333ULL & (out >> 2)); 1244 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1245 1246 if (size == 8) { /* popcntb */ 1247 op->val = out; 1248 return; 1249 } 1250 out += out >> 8; 1251 out += out >> 16; 1252 if (size == 32) { /* popcntw */ 1253 op->val = out & 0x0000003f0000003fULL; 1254 return; 1255 } 1256 1257 out = (out + (out >> 32)) & 0x7f; 1258 op->val = out; /* popcntd */ 1259 } 1260 1261 #ifdef CONFIG_PPC64 1262 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, 1263 struct instruction_op *op, 1264 unsigned long v1, unsigned long v2) 1265 { 1266 unsigned char perm, idx; 1267 unsigned int i; 1268 1269 perm = 0; 1270 for (i = 0; i < 8; i++) { 1271 idx = (v1 >> (i * 8)) & 0xff; 1272 if (idx < 64) 1273 if (v2 & PPC_BIT(idx)) 1274 perm |= 1 << i; 1275 } 1276 op->val = perm; 1277 } 1278 #endif /* CONFIG_PPC64 */ 1279 /* 1280 * The size parameter adjusts the equivalent prty instruction. 1281 * prtyw = 32, prtyd = 64 1282 */ 1283 static nokprobe_inline void do_prty(const struct pt_regs *regs, 1284 struct instruction_op *op, 1285 unsigned long v, int size) 1286 { 1287 unsigned long long res = v ^ (v >> 8); 1288 1289 res ^= res >> 16; 1290 if (size == 32) { /* prtyw */ 1291 op->val = res & 0x0000000100000001ULL; 1292 return; 1293 } 1294 1295 res ^= res >> 32; 1296 op->val = res & 1; /*prtyd */ 1297 } 1298 1299 static nokprobe_inline int trap_compare(long v1, long v2) 1300 { 1301 int ret = 0; 1302 1303 if (v1 < v2) 1304 ret |= 0x10; 1305 else if (v1 > v2) 1306 ret |= 0x08; 1307 else 1308 ret |= 0x04; 1309 if ((unsigned long)v1 < (unsigned long)v2) 1310 ret |= 0x02; 1311 else if ((unsigned long)v1 > (unsigned long)v2) 1312 ret |= 0x01; 1313 return ret; 1314 } 1315 1316 /* 1317 * Elements of 32-bit rotate and mask instructions. 1318 */ 1319 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 1320 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 1321 #ifdef __powerpc64__ 1322 #define MASK64_L(mb) (~0UL >> (mb)) 1323 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 1324 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 1325 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 1326 #else 1327 #define DATA32(x) (x) 1328 #endif 1329 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 1330 1331 /* 1332 * Decode an instruction, and return information about it in *op 1333 * without changing *regs. 1334 * Integer arithmetic and logical instructions, branches, and barrier 1335 * instructions can be emulated just using the information in *op. 1336 * 1337 * Return value is 1 if the instruction can be emulated just by 1338 * updating *regs with the information in *op, -1 if we need the 1339 * GPRs but *regs doesn't contain the full register set, or 0 1340 * otherwise. 1341 */ 1342 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1343 ppc_inst_t instr) 1344 { 1345 #ifdef CONFIG_PPC64 1346 unsigned int suffixopcode, prefixtype, prefix_r; 1347 #endif 1348 unsigned int opcode, ra, rb, rc, rd, spr, u; 1349 unsigned long int imm; 1350 unsigned long int val, val2; 1351 unsigned int mb, me, sh; 1352 unsigned int word, suffix; 1353 long ival; 1354 1355 word = ppc_inst_val(instr); 1356 suffix = ppc_inst_suffix(instr); 1357 1358 op->type = COMPUTE; 1359 1360 opcode = ppc_inst_primary_opcode(instr); 1361 switch (opcode) { 1362 case 16: /* bc */ 1363 op->type = BRANCH; 1364 imm = (signed short)(word & 0xfffc); 1365 if ((word & 2) == 0) 1366 imm += regs->nip; 1367 op->val = truncate_if_32bit(regs->msr, imm); 1368 if (word & 1) 1369 op->type |= SETLK; 1370 if (branch_taken(word, regs, op)) 1371 op->type |= BRTAKEN; 1372 return 1; 1373 #ifdef CONFIG_PPC64 1374 case 17: /* sc */ 1375 if ((word & 0xfe2) == 2) 1376 op->type = SYSCALL; 1377 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 1378 (word & 0xfe3) == 1) { /* scv */ 1379 op->type = SYSCALL_VECTORED_0; 1380 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1381 goto unknown_opcode; 1382 } else 1383 op->type = UNKNOWN; 1384 return 0; 1385 #endif 1386 case 18: /* b */ 1387 op->type = BRANCH | BRTAKEN; 1388 imm = word & 0x03fffffc; 1389 if (imm & 0x02000000) 1390 imm -= 0x04000000; 1391 if ((word & 2) == 0) 1392 imm += regs->nip; 1393 op->val = truncate_if_32bit(regs->msr, imm); 1394 if (word & 1) 1395 op->type |= SETLK; 1396 return 1; 1397 case 19: 1398 switch ((word >> 1) & 0x3ff) { 1399 case 0: /* mcrf */ 1400 op->type = COMPUTE + SETCC; 1401 rd = 7 - ((word >> 23) & 0x7); 1402 ra = 7 - ((word >> 18) & 0x7); 1403 rd *= 4; 1404 ra *= 4; 1405 val = (regs->ccr >> ra) & 0xf; 1406 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 1407 return 1; 1408 1409 case 16: /* bclr */ 1410 case 528: /* bcctr */ 1411 op->type = BRANCH; 1412 imm = (word & 0x400)? regs->ctr: regs->link; 1413 op->val = truncate_if_32bit(regs->msr, imm); 1414 if (word & 1) 1415 op->type |= SETLK; 1416 if (branch_taken(word, regs, op)) 1417 op->type |= BRTAKEN; 1418 return 1; 1419 1420 case 18: /* rfid, scary */ 1421 if (regs->msr & MSR_PR) 1422 goto priv; 1423 op->type = RFI; 1424 return 0; 1425 1426 case 150: /* isync */ 1427 op->type = BARRIER | BARRIER_ISYNC; 1428 return 1; 1429 1430 case 33: /* crnor */ 1431 case 129: /* crandc */ 1432 case 193: /* crxor */ 1433 case 225: /* crnand */ 1434 case 257: /* crand */ 1435 case 289: /* creqv */ 1436 case 417: /* crorc */ 1437 case 449: /* cror */ 1438 op->type = COMPUTE + SETCC; 1439 ra = (word >> 16) & 0x1f; 1440 rb = (word >> 11) & 0x1f; 1441 rd = (word >> 21) & 0x1f; 1442 ra = (regs->ccr >> (31 - ra)) & 1; 1443 rb = (regs->ccr >> (31 - rb)) & 1; 1444 val = (word >> (6 + ra * 2 + rb)) & 1; 1445 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | 1446 (val << (31 - rd)); 1447 return 1; 1448 } 1449 break; 1450 case 31: 1451 switch ((word >> 1) & 0x3ff) { 1452 case 598: /* sync */ 1453 op->type = BARRIER + BARRIER_SYNC; 1454 #ifdef __powerpc64__ 1455 switch ((word >> 21) & 3) { 1456 case 1: /* lwsync */ 1457 op->type = BARRIER + BARRIER_LWSYNC; 1458 break; 1459 case 2: /* ptesync */ 1460 op->type = BARRIER + BARRIER_PTESYNC; 1461 break; 1462 } 1463 #endif 1464 return 1; 1465 1466 case 854: /* eieio */ 1467 op->type = BARRIER + BARRIER_EIEIO; 1468 return 1; 1469 } 1470 break; 1471 } 1472 1473 rd = (word >> 21) & 0x1f; 1474 ra = (word >> 16) & 0x1f; 1475 rb = (word >> 11) & 0x1f; 1476 rc = (word >> 6) & 0x1f; 1477 1478 switch (opcode) { 1479 #ifdef __powerpc64__ 1480 case 1: 1481 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 1482 goto unknown_opcode; 1483 1484 prefix_r = GET_PREFIX_R(word); 1485 ra = GET_PREFIX_RA(suffix); 1486 rd = (suffix >> 21) & 0x1f; 1487 op->reg = rd; 1488 op->val = regs->gpr[rd]; 1489 suffixopcode = get_op(suffix); 1490 prefixtype = (word >> 24) & 0x3; 1491 switch (prefixtype) { 1492 case 2: 1493 if (prefix_r && ra) 1494 return 0; 1495 switch (suffixopcode) { 1496 case 14: /* paddi */ 1497 op->type = COMPUTE | PREFIXED; 1498 op->val = mlsd_8lsd_ea(word, suffix, regs); 1499 goto compute_done; 1500 } 1501 } 1502 break; 1503 case 2: /* tdi */ 1504 if (rd & trap_compare(regs->gpr[ra], (short) word)) 1505 goto trap; 1506 return 1; 1507 #endif 1508 case 3: /* twi */ 1509 if (rd & trap_compare((int)regs->gpr[ra], (short) word)) 1510 goto trap; 1511 return 1; 1512 1513 #ifdef __powerpc64__ 1514 case 4: 1515 /* 1516 * There are very many instructions with this primary opcode 1517 * introduced in the ISA as early as v2.03. However, the ones 1518 * we currently emulate were all introduced with ISA 3.0 1519 */ 1520 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1521 goto unknown_opcode; 1522 1523 switch (word & 0x3f) { 1524 case 48: /* maddhd */ 1525 asm volatile(PPC_MADDHD(%0, %1, %2, %3) : 1526 "=r" (op->val) : "r" (regs->gpr[ra]), 1527 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1528 goto compute_done; 1529 1530 case 49: /* maddhdu */ 1531 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : 1532 "=r" (op->val) : "r" (regs->gpr[ra]), 1533 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1534 goto compute_done; 1535 1536 case 51: /* maddld */ 1537 asm volatile(PPC_MADDLD(%0, %1, %2, %3) : 1538 "=r" (op->val) : "r" (regs->gpr[ra]), 1539 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1540 goto compute_done; 1541 } 1542 1543 /* 1544 * There are other instructions from ISA 3.0 with the same 1545 * primary opcode which do not have emulation support yet. 1546 */ 1547 goto unknown_opcode; 1548 #endif 1549 1550 case 7: /* mulli */ 1551 op->val = regs->gpr[ra] * (short) word; 1552 goto compute_done; 1553 1554 case 8: /* subfic */ 1555 imm = (short) word; 1556 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); 1557 return 1; 1558 1559 case 10: /* cmpli */ 1560 imm = (unsigned short) word; 1561 val = regs->gpr[ra]; 1562 #ifdef __powerpc64__ 1563 if ((rd & 1) == 0) 1564 val = (unsigned int) val; 1565 #endif 1566 do_cmp_unsigned(regs, op, val, imm, rd >> 2); 1567 return 1; 1568 1569 case 11: /* cmpi */ 1570 imm = (short) word; 1571 val = regs->gpr[ra]; 1572 #ifdef __powerpc64__ 1573 if ((rd & 1) == 0) 1574 val = (int) val; 1575 #endif 1576 do_cmp_signed(regs, op, val, imm, rd >> 2); 1577 return 1; 1578 1579 case 12: /* addic */ 1580 imm = (short) word; 1581 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1582 return 1; 1583 1584 case 13: /* addic. */ 1585 imm = (short) word; 1586 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1587 set_cr0(regs, op); 1588 return 1; 1589 1590 case 14: /* addi */ 1591 imm = (short) word; 1592 if (ra) 1593 imm += regs->gpr[ra]; 1594 op->val = imm; 1595 goto compute_done; 1596 1597 case 15: /* addis */ 1598 imm = ((short) word) << 16; 1599 if (ra) 1600 imm += regs->gpr[ra]; 1601 op->val = imm; 1602 goto compute_done; 1603 1604 case 19: 1605 if (((word >> 1) & 0x1f) == 2) { 1606 /* addpcis */ 1607 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1608 goto unknown_opcode; 1609 imm = (short) (word & 0xffc1); /* d0 + d2 fields */ 1610 imm |= (word >> 15) & 0x3e; /* d1 field */ 1611 op->val = regs->nip + (imm << 16) + 4; 1612 goto compute_done; 1613 } 1614 op->type = UNKNOWN; 1615 return 0; 1616 1617 case 20: /* rlwimi */ 1618 mb = (word >> 6) & 0x1f; 1619 me = (word >> 1) & 0x1f; 1620 val = DATA32(regs->gpr[rd]); 1621 imm = MASK32(mb, me); 1622 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 1623 goto logical_done; 1624 1625 case 21: /* rlwinm */ 1626 mb = (word >> 6) & 0x1f; 1627 me = (word >> 1) & 0x1f; 1628 val = DATA32(regs->gpr[rd]); 1629 op->val = ROTATE(val, rb) & MASK32(mb, me); 1630 goto logical_done; 1631 1632 case 23: /* rlwnm */ 1633 mb = (word >> 6) & 0x1f; 1634 me = (word >> 1) & 0x1f; 1635 rb = regs->gpr[rb] & 0x1f; 1636 val = DATA32(regs->gpr[rd]); 1637 op->val = ROTATE(val, rb) & MASK32(mb, me); 1638 goto logical_done; 1639 1640 case 24: /* ori */ 1641 op->val = regs->gpr[rd] | (unsigned short) word; 1642 goto logical_done_nocc; 1643 1644 case 25: /* oris */ 1645 imm = (unsigned short) word; 1646 op->val = regs->gpr[rd] | (imm << 16); 1647 goto logical_done_nocc; 1648 1649 case 26: /* xori */ 1650 op->val = regs->gpr[rd] ^ (unsigned short) word; 1651 goto logical_done_nocc; 1652 1653 case 27: /* xoris */ 1654 imm = (unsigned short) word; 1655 op->val = regs->gpr[rd] ^ (imm << 16); 1656 goto logical_done_nocc; 1657 1658 case 28: /* andi. */ 1659 op->val = regs->gpr[rd] & (unsigned short) word; 1660 set_cr0(regs, op); 1661 goto logical_done_nocc; 1662 1663 case 29: /* andis. */ 1664 imm = (unsigned short) word; 1665 op->val = regs->gpr[rd] & (imm << 16); 1666 set_cr0(regs, op); 1667 goto logical_done_nocc; 1668 1669 #ifdef __powerpc64__ 1670 case 30: /* rld* */ 1671 mb = ((word >> 6) & 0x1f) | (word & 0x20); 1672 val = regs->gpr[rd]; 1673 if ((word & 0x10) == 0) { 1674 sh = rb | ((word & 2) << 4); 1675 val = ROTATE(val, sh); 1676 switch ((word >> 2) & 3) { 1677 case 0: /* rldicl */ 1678 val &= MASK64_L(mb); 1679 break; 1680 case 1: /* rldicr */ 1681 val &= MASK64_R(mb); 1682 break; 1683 case 2: /* rldic */ 1684 val &= MASK64(mb, 63 - sh); 1685 break; 1686 case 3: /* rldimi */ 1687 imm = MASK64(mb, 63 - sh); 1688 val = (regs->gpr[ra] & ~imm) | 1689 (val & imm); 1690 } 1691 op->val = val; 1692 goto logical_done; 1693 } else { 1694 sh = regs->gpr[rb] & 0x3f; 1695 val = ROTATE(val, sh); 1696 switch ((word >> 1) & 7) { 1697 case 0: /* rldcl */ 1698 op->val = val & MASK64_L(mb); 1699 goto logical_done; 1700 case 1: /* rldcr */ 1701 op->val = val & MASK64_R(mb); 1702 goto logical_done; 1703 } 1704 } 1705 #endif 1706 op->type = UNKNOWN; /* illegal instruction */ 1707 return 0; 1708 1709 case 31: 1710 /* isel occupies 32 minor opcodes */ 1711 if (((word >> 1) & 0x1f) == 15) { 1712 mb = (word >> 6) & 0x1f; /* bc field */ 1713 val = (regs->ccr >> (31 - mb)) & 1; 1714 val2 = (ra) ? regs->gpr[ra] : 0; 1715 1716 op->val = (val) ? val2 : regs->gpr[rb]; 1717 goto compute_done; 1718 } 1719 1720 switch ((word >> 1) & 0x3ff) { 1721 case 4: /* tw */ 1722 if (rd == 0x1f || 1723 (rd & trap_compare((int)regs->gpr[ra], 1724 (int)regs->gpr[rb]))) 1725 goto trap; 1726 return 1; 1727 #ifdef __powerpc64__ 1728 case 68: /* td */ 1729 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 1730 goto trap; 1731 return 1; 1732 #endif 1733 case 83: /* mfmsr */ 1734 if (regs->msr & MSR_PR) 1735 goto priv; 1736 op->type = MFMSR; 1737 op->reg = rd; 1738 return 0; 1739 case 146: /* mtmsr */ 1740 if (regs->msr & MSR_PR) 1741 goto priv; 1742 op->type = MTMSR; 1743 op->reg = rd; 1744 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 1745 return 0; 1746 #ifdef CONFIG_PPC64 1747 case 178: /* mtmsrd */ 1748 if (regs->msr & MSR_PR) 1749 goto priv; 1750 op->type = MTMSR; 1751 op->reg = rd; 1752 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 1753 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 1754 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 1755 op->val = imm; 1756 return 0; 1757 #endif 1758 1759 case 19: /* mfcr */ 1760 imm = 0xffffffffUL; 1761 if ((word >> 20) & 1) { 1762 imm = 0xf0000000UL; 1763 for (sh = 0; sh < 8; ++sh) { 1764 if (word & (0x80000 >> sh)) 1765 break; 1766 imm >>= 4; 1767 } 1768 } 1769 op->val = regs->ccr & imm; 1770 goto compute_done; 1771 1772 case 128: /* setb */ 1773 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1774 goto unknown_opcode; 1775 /* 1776 * 'ra' encodes the CR field number (bfa) in the top 3 bits. 1777 * Since each CR field is 4 bits, 1778 * we can simply mask off the bottom two bits (bfa * 4) 1779 * to yield the first bit in the CR field. 1780 */ 1781 ra = ra & ~0x3; 1782 /* 'val' stores bits of the CR field (bfa) */ 1783 val = regs->ccr >> (CR0_SHIFT - ra); 1784 /* checks if the LT bit of CR field (bfa) is set */ 1785 if (val & 8) 1786 op->val = -1; 1787 /* checks if the GT bit of CR field (bfa) is set */ 1788 else if (val & 4) 1789 op->val = 1; 1790 else 1791 op->val = 0; 1792 goto compute_done; 1793 1794 case 144: /* mtcrf */ 1795 op->type = COMPUTE + SETCC; 1796 imm = 0xf0000000UL; 1797 val = regs->gpr[rd]; 1798 op->ccval = regs->ccr; 1799 for (sh = 0; sh < 8; ++sh) { 1800 if (word & (0x80000 >> sh)) 1801 op->ccval = (op->ccval & ~imm) | 1802 (val & imm); 1803 imm >>= 4; 1804 } 1805 return 1; 1806 1807 case 339: /* mfspr */ 1808 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1809 op->type = MFSPR; 1810 op->reg = rd; 1811 op->spr = spr; 1812 if (spr == SPRN_XER || spr == SPRN_LR || 1813 spr == SPRN_CTR) 1814 return 1; 1815 return 0; 1816 1817 case 467: /* mtspr */ 1818 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1819 op->type = MTSPR; 1820 op->val = regs->gpr[rd]; 1821 op->spr = spr; 1822 if (spr == SPRN_XER || spr == SPRN_LR || 1823 spr == SPRN_CTR) 1824 return 1; 1825 return 0; 1826 1827 /* 1828 * Compare instructions 1829 */ 1830 case 0: /* cmp */ 1831 val = regs->gpr[ra]; 1832 val2 = regs->gpr[rb]; 1833 #ifdef __powerpc64__ 1834 if ((rd & 1) == 0) { 1835 /* word (32-bit) compare */ 1836 val = (int) val; 1837 val2 = (int) val2; 1838 } 1839 #endif 1840 do_cmp_signed(regs, op, val, val2, rd >> 2); 1841 return 1; 1842 1843 case 32: /* cmpl */ 1844 val = regs->gpr[ra]; 1845 val2 = regs->gpr[rb]; 1846 #ifdef __powerpc64__ 1847 if ((rd & 1) == 0) { 1848 /* word (32-bit) compare */ 1849 val = (unsigned int) val; 1850 val2 = (unsigned int) val2; 1851 } 1852 #endif 1853 do_cmp_unsigned(regs, op, val, val2, rd >> 2); 1854 return 1; 1855 1856 case 508: /* cmpb */ 1857 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); 1858 goto logical_done_nocc; 1859 1860 /* 1861 * Arithmetic instructions 1862 */ 1863 case 8: /* subfc */ 1864 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1865 regs->gpr[rb], 1); 1866 goto arith_done; 1867 #ifdef __powerpc64__ 1868 case 9: /* mulhdu */ 1869 asm("mulhdu %0,%1,%2" : "=r" (op->val) : 1870 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1871 goto arith_done; 1872 #endif 1873 case 10: /* addc */ 1874 add_with_carry(regs, op, rd, regs->gpr[ra], 1875 regs->gpr[rb], 0); 1876 goto arith_done; 1877 1878 case 11: /* mulhwu */ 1879 asm("mulhwu %0,%1,%2" : "=r" (op->val) : 1880 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1881 goto arith_done; 1882 1883 case 40: /* subf */ 1884 op->val = regs->gpr[rb] - regs->gpr[ra]; 1885 goto arith_done; 1886 #ifdef __powerpc64__ 1887 case 73: /* mulhd */ 1888 asm("mulhd %0,%1,%2" : "=r" (op->val) : 1889 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1890 goto arith_done; 1891 #endif 1892 case 75: /* mulhw */ 1893 asm("mulhw %0,%1,%2" : "=r" (op->val) : 1894 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1895 goto arith_done; 1896 1897 case 104: /* neg */ 1898 op->val = -regs->gpr[ra]; 1899 goto arith_done; 1900 1901 case 136: /* subfe */ 1902 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1903 regs->gpr[rb], regs->xer & XER_CA); 1904 goto arith_done; 1905 1906 case 138: /* adde */ 1907 add_with_carry(regs, op, rd, regs->gpr[ra], 1908 regs->gpr[rb], regs->xer & XER_CA); 1909 goto arith_done; 1910 1911 case 200: /* subfze */ 1912 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, 1913 regs->xer & XER_CA); 1914 goto arith_done; 1915 1916 case 202: /* addze */ 1917 add_with_carry(regs, op, rd, regs->gpr[ra], 0L, 1918 regs->xer & XER_CA); 1919 goto arith_done; 1920 1921 case 232: /* subfme */ 1922 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, 1923 regs->xer & XER_CA); 1924 goto arith_done; 1925 #ifdef __powerpc64__ 1926 case 233: /* mulld */ 1927 op->val = regs->gpr[ra] * regs->gpr[rb]; 1928 goto arith_done; 1929 #endif 1930 case 234: /* addme */ 1931 add_with_carry(regs, op, rd, regs->gpr[ra], -1L, 1932 regs->xer & XER_CA); 1933 goto arith_done; 1934 1935 case 235: /* mullw */ 1936 op->val = (long)(int) regs->gpr[ra] * 1937 (int) regs->gpr[rb]; 1938 1939 goto arith_done; 1940 #ifdef __powerpc64__ 1941 case 265: /* modud */ 1942 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1943 goto unknown_opcode; 1944 op->val = regs->gpr[ra] % regs->gpr[rb]; 1945 goto compute_done; 1946 #endif 1947 case 266: /* add */ 1948 op->val = regs->gpr[ra] + regs->gpr[rb]; 1949 goto arith_done; 1950 1951 case 267: /* moduw */ 1952 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1953 goto unknown_opcode; 1954 op->val = (unsigned int) regs->gpr[ra] % 1955 (unsigned int) regs->gpr[rb]; 1956 goto compute_done; 1957 #ifdef __powerpc64__ 1958 case 457: /* divdu */ 1959 op->val = regs->gpr[ra] / regs->gpr[rb]; 1960 goto arith_done; 1961 #endif 1962 case 459: /* divwu */ 1963 op->val = (unsigned int) regs->gpr[ra] / 1964 (unsigned int) regs->gpr[rb]; 1965 goto arith_done; 1966 #ifdef __powerpc64__ 1967 case 489: /* divd */ 1968 op->val = (long int) regs->gpr[ra] / 1969 (long int) regs->gpr[rb]; 1970 goto arith_done; 1971 #endif 1972 case 491: /* divw */ 1973 op->val = (int) regs->gpr[ra] / 1974 (int) regs->gpr[rb]; 1975 goto arith_done; 1976 #ifdef __powerpc64__ 1977 case 425: /* divde[.] */ 1978 asm volatile(PPC_DIVDE(%0, %1, %2) : 1979 "=r" (op->val) : "r" (regs->gpr[ra]), 1980 "r" (regs->gpr[rb])); 1981 goto arith_done; 1982 case 393: /* divdeu[.] */ 1983 asm volatile(PPC_DIVDEU(%0, %1, %2) : 1984 "=r" (op->val) : "r" (regs->gpr[ra]), 1985 "r" (regs->gpr[rb])); 1986 goto arith_done; 1987 #endif 1988 case 755: /* darn */ 1989 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1990 goto unknown_opcode; 1991 switch (ra & 0x3) { 1992 case 0: 1993 /* 32-bit conditioned */ 1994 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); 1995 goto compute_done; 1996 1997 case 1: 1998 /* 64-bit conditioned */ 1999 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); 2000 goto compute_done; 2001 2002 case 2: 2003 /* 64-bit raw */ 2004 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); 2005 goto compute_done; 2006 } 2007 2008 goto unknown_opcode; 2009 #ifdef __powerpc64__ 2010 case 777: /* modsd */ 2011 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2012 goto unknown_opcode; 2013 op->val = (long int) regs->gpr[ra] % 2014 (long int) regs->gpr[rb]; 2015 goto compute_done; 2016 #endif 2017 case 779: /* modsw */ 2018 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2019 goto unknown_opcode; 2020 op->val = (int) regs->gpr[ra] % 2021 (int) regs->gpr[rb]; 2022 goto compute_done; 2023 2024 2025 /* 2026 * Logical instructions 2027 */ 2028 case 26: /* cntlzw */ 2029 val = (unsigned int) regs->gpr[rd]; 2030 op->val = ( val ? __builtin_clz(val) : 32 ); 2031 goto logical_done; 2032 #ifdef __powerpc64__ 2033 case 58: /* cntlzd */ 2034 val = regs->gpr[rd]; 2035 op->val = ( val ? __builtin_clzl(val) : 64 ); 2036 goto logical_done; 2037 #endif 2038 case 28: /* and */ 2039 op->val = regs->gpr[rd] & regs->gpr[rb]; 2040 goto logical_done; 2041 2042 case 60: /* andc */ 2043 op->val = regs->gpr[rd] & ~regs->gpr[rb]; 2044 goto logical_done; 2045 2046 case 122: /* popcntb */ 2047 do_popcnt(regs, op, regs->gpr[rd], 8); 2048 goto logical_done_nocc; 2049 2050 case 124: /* nor */ 2051 op->val = ~(regs->gpr[rd] | regs->gpr[rb]); 2052 goto logical_done; 2053 2054 case 154: /* prtyw */ 2055 do_prty(regs, op, regs->gpr[rd], 32); 2056 goto logical_done_nocc; 2057 2058 case 186: /* prtyd */ 2059 do_prty(regs, op, regs->gpr[rd], 64); 2060 goto logical_done_nocc; 2061 #ifdef CONFIG_PPC64 2062 case 252: /* bpermd */ 2063 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); 2064 goto logical_done_nocc; 2065 #endif 2066 case 284: /* xor */ 2067 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); 2068 goto logical_done; 2069 2070 case 316: /* xor */ 2071 op->val = regs->gpr[rd] ^ regs->gpr[rb]; 2072 goto logical_done; 2073 2074 case 378: /* popcntw */ 2075 do_popcnt(regs, op, regs->gpr[rd], 32); 2076 goto logical_done_nocc; 2077 2078 case 412: /* orc */ 2079 op->val = regs->gpr[rd] | ~regs->gpr[rb]; 2080 goto logical_done; 2081 2082 case 444: /* or */ 2083 op->val = regs->gpr[rd] | regs->gpr[rb]; 2084 goto logical_done; 2085 2086 case 476: /* nand */ 2087 op->val = ~(regs->gpr[rd] & regs->gpr[rb]); 2088 goto logical_done; 2089 #ifdef CONFIG_PPC64 2090 case 506: /* popcntd */ 2091 do_popcnt(regs, op, regs->gpr[rd], 64); 2092 goto logical_done_nocc; 2093 #endif 2094 case 538: /* cnttzw */ 2095 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2096 goto unknown_opcode; 2097 val = (unsigned int) regs->gpr[rd]; 2098 op->val = (val ? __builtin_ctz(val) : 32); 2099 goto logical_done; 2100 #ifdef __powerpc64__ 2101 case 570: /* cnttzd */ 2102 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2103 goto unknown_opcode; 2104 val = regs->gpr[rd]; 2105 op->val = (val ? __builtin_ctzl(val) : 64); 2106 goto logical_done; 2107 #endif 2108 case 922: /* extsh */ 2109 op->val = (signed short) regs->gpr[rd]; 2110 goto logical_done; 2111 2112 case 954: /* extsb */ 2113 op->val = (signed char) regs->gpr[rd]; 2114 goto logical_done; 2115 #ifdef __powerpc64__ 2116 case 986: /* extsw */ 2117 op->val = (signed int) regs->gpr[rd]; 2118 goto logical_done; 2119 #endif 2120 2121 /* 2122 * Shift instructions 2123 */ 2124 case 24: /* slw */ 2125 sh = regs->gpr[rb] & 0x3f; 2126 if (sh < 32) 2127 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; 2128 else 2129 op->val = 0; 2130 goto logical_done; 2131 2132 case 536: /* srw */ 2133 sh = regs->gpr[rb] & 0x3f; 2134 if (sh < 32) 2135 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; 2136 else 2137 op->val = 0; 2138 goto logical_done; 2139 2140 case 792: /* sraw */ 2141 op->type = COMPUTE + SETREG + SETXER; 2142 sh = regs->gpr[rb] & 0x3f; 2143 ival = (signed int) regs->gpr[rd]; 2144 op->val = ival >> (sh < 32 ? sh : 31); 2145 op->xerval = regs->xer; 2146 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 2147 op->xerval |= XER_CA; 2148 else 2149 op->xerval &= ~XER_CA; 2150 set_ca32(op, op->xerval & XER_CA); 2151 goto logical_done; 2152 2153 case 824: /* srawi */ 2154 op->type = COMPUTE + SETREG + SETXER; 2155 sh = rb; 2156 ival = (signed int) regs->gpr[rd]; 2157 op->val = ival >> sh; 2158 op->xerval = regs->xer; 2159 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2160 op->xerval |= XER_CA; 2161 else 2162 op->xerval &= ~XER_CA; 2163 set_ca32(op, op->xerval & XER_CA); 2164 goto logical_done; 2165 2166 #ifdef __powerpc64__ 2167 case 27: /* sld */ 2168 sh = regs->gpr[rb] & 0x7f; 2169 if (sh < 64) 2170 op->val = regs->gpr[rd] << sh; 2171 else 2172 op->val = 0; 2173 goto logical_done; 2174 2175 case 539: /* srd */ 2176 sh = regs->gpr[rb] & 0x7f; 2177 if (sh < 64) 2178 op->val = regs->gpr[rd] >> sh; 2179 else 2180 op->val = 0; 2181 goto logical_done; 2182 2183 case 794: /* srad */ 2184 op->type = COMPUTE + SETREG + SETXER; 2185 sh = regs->gpr[rb] & 0x7f; 2186 ival = (signed long int) regs->gpr[rd]; 2187 op->val = ival >> (sh < 64 ? sh : 63); 2188 op->xerval = regs->xer; 2189 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 2190 op->xerval |= XER_CA; 2191 else 2192 op->xerval &= ~XER_CA; 2193 set_ca32(op, op->xerval & XER_CA); 2194 goto logical_done; 2195 2196 case 826: /* sradi with sh_5 = 0 */ 2197 case 827: /* sradi with sh_5 = 1 */ 2198 op->type = COMPUTE + SETREG + SETXER; 2199 sh = rb | ((word & 2) << 4); 2200 ival = (signed long int) regs->gpr[rd]; 2201 op->val = ival >> sh; 2202 op->xerval = regs->xer; 2203 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2204 op->xerval |= XER_CA; 2205 else 2206 op->xerval &= ~XER_CA; 2207 set_ca32(op, op->xerval & XER_CA); 2208 goto logical_done; 2209 2210 case 890: /* extswsli with sh_5 = 0 */ 2211 case 891: /* extswsli with sh_5 = 1 */ 2212 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2213 goto unknown_opcode; 2214 op->type = COMPUTE + SETREG; 2215 sh = rb | ((word & 2) << 4); 2216 val = (signed int) regs->gpr[rd]; 2217 if (sh) 2218 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); 2219 else 2220 op->val = val; 2221 goto logical_done; 2222 2223 #endif /* __powerpc64__ */ 2224 2225 /* 2226 * Cache instructions 2227 */ 2228 case 54: /* dcbst */ 2229 op->type = MKOP(CACHEOP, DCBST, 0); 2230 op->ea = xform_ea(word, regs); 2231 return 0; 2232 2233 case 86: /* dcbf */ 2234 op->type = MKOP(CACHEOP, DCBF, 0); 2235 op->ea = xform_ea(word, regs); 2236 return 0; 2237 2238 case 246: /* dcbtst */ 2239 op->type = MKOP(CACHEOP, DCBTST, 0); 2240 op->ea = xform_ea(word, regs); 2241 op->reg = rd; 2242 return 0; 2243 2244 case 278: /* dcbt */ 2245 op->type = MKOP(CACHEOP, DCBTST, 0); 2246 op->ea = xform_ea(word, regs); 2247 op->reg = rd; 2248 return 0; 2249 2250 case 982: /* icbi */ 2251 op->type = MKOP(CACHEOP, ICBI, 0); 2252 op->ea = xform_ea(word, regs); 2253 return 0; 2254 2255 case 1014: /* dcbz */ 2256 op->type = MKOP(CACHEOP, DCBZ, 0); 2257 op->ea = xform_ea(word, regs); 2258 return 0; 2259 } 2260 break; 2261 } 2262 2263 /* 2264 * Loads and stores. 2265 */ 2266 op->type = UNKNOWN; 2267 op->update_reg = ra; 2268 op->reg = rd; 2269 op->val = regs->gpr[rd]; 2270 u = (word >> 20) & UPDATE; 2271 op->vsx_flags = 0; 2272 2273 switch (opcode) { 2274 case 31: 2275 u = word & UPDATE; 2276 op->ea = xform_ea(word, regs); 2277 switch ((word >> 1) & 0x3ff) { 2278 case 20: /* lwarx */ 2279 op->type = MKOP(LARX, 0, 4); 2280 break; 2281 2282 case 150: /* stwcx. */ 2283 op->type = MKOP(STCX, 0, 4); 2284 break; 2285 2286 #ifdef __powerpc64__ 2287 case 84: /* ldarx */ 2288 op->type = MKOP(LARX, 0, 8); 2289 break; 2290 2291 case 214: /* stdcx. */ 2292 op->type = MKOP(STCX, 0, 8); 2293 break; 2294 2295 case 52: /* lbarx */ 2296 op->type = MKOP(LARX, 0, 1); 2297 break; 2298 2299 case 694: /* stbcx. */ 2300 op->type = MKOP(STCX, 0, 1); 2301 break; 2302 2303 case 116: /* lharx */ 2304 op->type = MKOP(LARX, 0, 2); 2305 break; 2306 2307 case 726: /* sthcx. */ 2308 op->type = MKOP(STCX, 0, 2); 2309 break; 2310 2311 case 276: /* lqarx */ 2312 if (!((rd & 1) || rd == ra || rd == rb)) 2313 op->type = MKOP(LARX, 0, 16); 2314 break; 2315 2316 case 182: /* stqcx. */ 2317 if (!(rd & 1)) 2318 op->type = MKOP(STCX, 0, 16); 2319 break; 2320 #endif 2321 2322 case 23: /* lwzx */ 2323 case 55: /* lwzux */ 2324 op->type = MKOP(LOAD, u, 4); 2325 break; 2326 2327 case 87: /* lbzx */ 2328 case 119: /* lbzux */ 2329 op->type = MKOP(LOAD, u, 1); 2330 break; 2331 2332 #ifdef CONFIG_ALTIVEC 2333 /* 2334 * Note: for the load/store vector element instructions, 2335 * bits of the EA say which field of the VMX register to use. 2336 */ 2337 case 7: /* lvebx */ 2338 op->type = MKOP(LOAD_VMX, 0, 1); 2339 op->element_size = 1; 2340 break; 2341 2342 case 39: /* lvehx */ 2343 op->type = MKOP(LOAD_VMX, 0, 2); 2344 op->element_size = 2; 2345 break; 2346 2347 case 71: /* lvewx */ 2348 op->type = MKOP(LOAD_VMX, 0, 4); 2349 op->element_size = 4; 2350 break; 2351 2352 case 103: /* lvx */ 2353 case 359: /* lvxl */ 2354 op->type = MKOP(LOAD_VMX, 0, 16); 2355 op->element_size = 16; 2356 break; 2357 2358 case 135: /* stvebx */ 2359 op->type = MKOP(STORE_VMX, 0, 1); 2360 op->element_size = 1; 2361 break; 2362 2363 case 167: /* stvehx */ 2364 op->type = MKOP(STORE_VMX, 0, 2); 2365 op->element_size = 2; 2366 break; 2367 2368 case 199: /* stvewx */ 2369 op->type = MKOP(STORE_VMX, 0, 4); 2370 op->element_size = 4; 2371 break; 2372 2373 case 231: /* stvx */ 2374 case 487: /* stvxl */ 2375 op->type = MKOP(STORE_VMX, 0, 16); 2376 break; 2377 #endif /* CONFIG_ALTIVEC */ 2378 2379 #ifdef __powerpc64__ 2380 case 21: /* ldx */ 2381 case 53: /* ldux */ 2382 op->type = MKOP(LOAD, u, 8); 2383 break; 2384 2385 case 149: /* stdx */ 2386 case 181: /* stdux */ 2387 op->type = MKOP(STORE, u, 8); 2388 break; 2389 #endif 2390 2391 case 151: /* stwx */ 2392 case 183: /* stwux */ 2393 op->type = MKOP(STORE, u, 4); 2394 break; 2395 2396 case 215: /* stbx */ 2397 case 247: /* stbux */ 2398 op->type = MKOP(STORE, u, 1); 2399 break; 2400 2401 case 279: /* lhzx */ 2402 case 311: /* lhzux */ 2403 op->type = MKOP(LOAD, u, 2); 2404 break; 2405 2406 #ifdef __powerpc64__ 2407 case 341: /* lwax */ 2408 case 373: /* lwaux */ 2409 op->type = MKOP(LOAD, SIGNEXT | u, 4); 2410 break; 2411 #endif 2412 2413 case 343: /* lhax */ 2414 case 375: /* lhaux */ 2415 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2416 break; 2417 2418 case 407: /* sthx */ 2419 case 439: /* sthux */ 2420 op->type = MKOP(STORE, u, 2); 2421 break; 2422 2423 #ifdef __powerpc64__ 2424 case 532: /* ldbrx */ 2425 op->type = MKOP(LOAD, BYTEREV, 8); 2426 break; 2427 2428 #endif 2429 case 533: /* lswx */ 2430 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 2431 break; 2432 2433 case 534: /* lwbrx */ 2434 op->type = MKOP(LOAD, BYTEREV, 4); 2435 break; 2436 2437 case 597: /* lswi */ 2438 if (rb == 0) 2439 rb = 32; /* # bytes to load */ 2440 op->type = MKOP(LOAD_MULTI, 0, rb); 2441 op->ea = ra ? regs->gpr[ra] : 0; 2442 break; 2443 2444 #ifdef CONFIG_PPC_FPU 2445 case 535: /* lfsx */ 2446 case 567: /* lfsux */ 2447 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2448 break; 2449 2450 case 599: /* lfdx */ 2451 case 631: /* lfdux */ 2452 op->type = MKOP(LOAD_FP, u, 8); 2453 break; 2454 2455 case 663: /* stfsx */ 2456 case 695: /* stfsux */ 2457 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2458 break; 2459 2460 case 727: /* stfdx */ 2461 case 759: /* stfdux */ 2462 op->type = MKOP(STORE_FP, u, 8); 2463 break; 2464 2465 #ifdef __powerpc64__ 2466 case 791: /* lfdpx */ 2467 op->type = MKOP(LOAD_FP, 0, 16); 2468 break; 2469 2470 case 855: /* lfiwax */ 2471 op->type = MKOP(LOAD_FP, SIGNEXT, 4); 2472 break; 2473 2474 case 887: /* lfiwzx */ 2475 op->type = MKOP(LOAD_FP, 0, 4); 2476 break; 2477 2478 case 919: /* stfdpx */ 2479 op->type = MKOP(STORE_FP, 0, 16); 2480 break; 2481 2482 case 983: /* stfiwx */ 2483 op->type = MKOP(STORE_FP, 0, 4); 2484 break; 2485 #endif /* __powerpc64 */ 2486 #endif /* CONFIG_PPC_FPU */ 2487 2488 #ifdef __powerpc64__ 2489 case 660: /* stdbrx */ 2490 op->type = MKOP(STORE, BYTEREV, 8); 2491 op->val = byterev_8(regs->gpr[rd]); 2492 break; 2493 2494 #endif 2495 case 661: /* stswx */ 2496 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 2497 break; 2498 2499 case 662: /* stwbrx */ 2500 op->type = MKOP(STORE, BYTEREV, 4); 2501 op->val = byterev_4(regs->gpr[rd]); 2502 break; 2503 2504 case 725: /* stswi */ 2505 if (rb == 0) 2506 rb = 32; /* # bytes to store */ 2507 op->type = MKOP(STORE_MULTI, 0, rb); 2508 op->ea = ra ? regs->gpr[ra] : 0; 2509 break; 2510 2511 case 790: /* lhbrx */ 2512 op->type = MKOP(LOAD, BYTEREV, 2); 2513 break; 2514 2515 case 918: /* sthbrx */ 2516 op->type = MKOP(STORE, BYTEREV, 2); 2517 op->val = byterev_2(regs->gpr[rd]); 2518 break; 2519 2520 #ifdef CONFIG_VSX 2521 case 12: /* lxsiwzx */ 2522 op->reg = rd | ((word & 1) << 5); 2523 op->type = MKOP(LOAD_VSX, 0, 4); 2524 op->element_size = 8; 2525 break; 2526 2527 case 76: /* lxsiwax */ 2528 op->reg = rd | ((word & 1) << 5); 2529 op->type = MKOP(LOAD_VSX, SIGNEXT, 4); 2530 op->element_size = 8; 2531 break; 2532 2533 case 140: /* stxsiwx */ 2534 op->reg = rd | ((word & 1) << 5); 2535 op->type = MKOP(STORE_VSX, 0, 4); 2536 op->element_size = 8; 2537 break; 2538 2539 case 268: /* lxvx */ 2540 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2541 goto unknown_opcode; 2542 op->reg = rd | ((word & 1) << 5); 2543 op->type = MKOP(LOAD_VSX, 0, 16); 2544 op->element_size = 16; 2545 op->vsx_flags = VSX_CHECK_VEC; 2546 break; 2547 2548 case 269: /* lxvl */ 2549 case 301: { /* lxvll */ 2550 int nb; 2551 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2552 goto unknown_opcode; 2553 op->reg = rd | ((word & 1) << 5); 2554 op->ea = ra ? regs->gpr[ra] : 0; 2555 nb = regs->gpr[rb] & 0xff; 2556 if (nb > 16) 2557 nb = 16; 2558 op->type = MKOP(LOAD_VSX, 0, nb); 2559 op->element_size = 16; 2560 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2561 VSX_CHECK_VEC; 2562 break; 2563 } 2564 case 332: /* lxvdsx */ 2565 op->reg = rd | ((word & 1) << 5); 2566 op->type = MKOP(LOAD_VSX, 0, 8); 2567 op->element_size = 8; 2568 op->vsx_flags = VSX_SPLAT; 2569 break; 2570 2571 case 333: /* lxvpx */ 2572 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2573 goto unknown_opcode; 2574 op->reg = VSX_REGISTER_XTP(rd); 2575 op->type = MKOP(LOAD_VSX, 0, 32); 2576 op->element_size = 32; 2577 break; 2578 2579 case 364: /* lxvwsx */ 2580 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2581 goto unknown_opcode; 2582 op->reg = rd | ((word & 1) << 5); 2583 op->type = MKOP(LOAD_VSX, 0, 4); 2584 op->element_size = 4; 2585 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; 2586 break; 2587 2588 case 396: /* stxvx */ 2589 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2590 goto unknown_opcode; 2591 op->reg = rd | ((word & 1) << 5); 2592 op->type = MKOP(STORE_VSX, 0, 16); 2593 op->element_size = 16; 2594 op->vsx_flags = VSX_CHECK_VEC; 2595 break; 2596 2597 case 397: /* stxvl */ 2598 case 429: { /* stxvll */ 2599 int nb; 2600 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2601 goto unknown_opcode; 2602 op->reg = rd | ((word & 1) << 5); 2603 op->ea = ra ? regs->gpr[ra] : 0; 2604 nb = regs->gpr[rb] & 0xff; 2605 if (nb > 16) 2606 nb = 16; 2607 op->type = MKOP(STORE_VSX, 0, nb); 2608 op->element_size = 16; 2609 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2610 VSX_CHECK_VEC; 2611 break; 2612 } 2613 case 461: /* stxvpx */ 2614 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2615 goto unknown_opcode; 2616 op->reg = VSX_REGISTER_XTP(rd); 2617 op->type = MKOP(STORE_VSX, 0, 32); 2618 op->element_size = 32; 2619 break; 2620 case 524: /* lxsspx */ 2621 op->reg = rd | ((word & 1) << 5); 2622 op->type = MKOP(LOAD_VSX, 0, 4); 2623 op->element_size = 8; 2624 op->vsx_flags = VSX_FPCONV; 2625 break; 2626 2627 case 588: /* lxsdx */ 2628 op->reg = rd | ((word & 1) << 5); 2629 op->type = MKOP(LOAD_VSX, 0, 8); 2630 op->element_size = 8; 2631 break; 2632 2633 case 652: /* stxsspx */ 2634 op->reg = rd | ((word & 1) << 5); 2635 op->type = MKOP(STORE_VSX, 0, 4); 2636 op->element_size = 8; 2637 op->vsx_flags = VSX_FPCONV; 2638 break; 2639 2640 case 716: /* stxsdx */ 2641 op->reg = rd | ((word & 1) << 5); 2642 op->type = MKOP(STORE_VSX, 0, 8); 2643 op->element_size = 8; 2644 break; 2645 2646 case 780: /* lxvw4x */ 2647 op->reg = rd | ((word & 1) << 5); 2648 op->type = MKOP(LOAD_VSX, 0, 16); 2649 op->element_size = 4; 2650 break; 2651 2652 case 781: /* lxsibzx */ 2653 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2654 goto unknown_opcode; 2655 op->reg = rd | ((word & 1) << 5); 2656 op->type = MKOP(LOAD_VSX, 0, 1); 2657 op->element_size = 8; 2658 op->vsx_flags = VSX_CHECK_VEC; 2659 break; 2660 2661 case 812: /* lxvh8x */ 2662 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2663 goto unknown_opcode; 2664 op->reg = rd | ((word & 1) << 5); 2665 op->type = MKOP(LOAD_VSX, 0, 16); 2666 op->element_size = 2; 2667 op->vsx_flags = VSX_CHECK_VEC; 2668 break; 2669 2670 case 813: /* lxsihzx */ 2671 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2672 goto unknown_opcode; 2673 op->reg = rd | ((word & 1) << 5); 2674 op->type = MKOP(LOAD_VSX, 0, 2); 2675 op->element_size = 8; 2676 op->vsx_flags = VSX_CHECK_VEC; 2677 break; 2678 2679 case 844: /* lxvd2x */ 2680 op->reg = rd | ((word & 1) << 5); 2681 op->type = MKOP(LOAD_VSX, 0, 16); 2682 op->element_size = 8; 2683 break; 2684 2685 case 876: /* lxvb16x */ 2686 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2687 goto unknown_opcode; 2688 op->reg = rd | ((word & 1) << 5); 2689 op->type = MKOP(LOAD_VSX, 0, 16); 2690 op->element_size = 1; 2691 op->vsx_flags = VSX_CHECK_VEC; 2692 break; 2693 2694 case 908: /* stxvw4x */ 2695 op->reg = rd | ((word & 1) << 5); 2696 op->type = MKOP(STORE_VSX, 0, 16); 2697 op->element_size = 4; 2698 break; 2699 2700 case 909: /* stxsibx */ 2701 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2702 goto unknown_opcode; 2703 op->reg = rd | ((word & 1) << 5); 2704 op->type = MKOP(STORE_VSX, 0, 1); 2705 op->element_size = 8; 2706 op->vsx_flags = VSX_CHECK_VEC; 2707 break; 2708 2709 case 940: /* stxvh8x */ 2710 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2711 goto unknown_opcode; 2712 op->reg = rd | ((word & 1) << 5); 2713 op->type = MKOP(STORE_VSX, 0, 16); 2714 op->element_size = 2; 2715 op->vsx_flags = VSX_CHECK_VEC; 2716 break; 2717 2718 case 941: /* stxsihx */ 2719 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2720 goto unknown_opcode; 2721 op->reg = rd | ((word & 1) << 5); 2722 op->type = MKOP(STORE_VSX, 0, 2); 2723 op->element_size = 8; 2724 op->vsx_flags = VSX_CHECK_VEC; 2725 break; 2726 2727 case 972: /* stxvd2x */ 2728 op->reg = rd | ((word & 1) << 5); 2729 op->type = MKOP(STORE_VSX, 0, 16); 2730 op->element_size = 8; 2731 break; 2732 2733 case 1004: /* stxvb16x */ 2734 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2735 goto unknown_opcode; 2736 op->reg = rd | ((word & 1) << 5); 2737 op->type = MKOP(STORE_VSX, 0, 16); 2738 op->element_size = 1; 2739 op->vsx_flags = VSX_CHECK_VEC; 2740 break; 2741 2742 #endif /* CONFIG_VSX */ 2743 } 2744 break; 2745 2746 case 32: /* lwz */ 2747 case 33: /* lwzu */ 2748 op->type = MKOP(LOAD, u, 4); 2749 op->ea = dform_ea(word, regs); 2750 break; 2751 2752 case 34: /* lbz */ 2753 case 35: /* lbzu */ 2754 op->type = MKOP(LOAD, u, 1); 2755 op->ea = dform_ea(word, regs); 2756 break; 2757 2758 case 36: /* stw */ 2759 case 37: /* stwu */ 2760 op->type = MKOP(STORE, u, 4); 2761 op->ea = dform_ea(word, regs); 2762 break; 2763 2764 case 38: /* stb */ 2765 case 39: /* stbu */ 2766 op->type = MKOP(STORE, u, 1); 2767 op->ea = dform_ea(word, regs); 2768 break; 2769 2770 case 40: /* lhz */ 2771 case 41: /* lhzu */ 2772 op->type = MKOP(LOAD, u, 2); 2773 op->ea = dform_ea(word, regs); 2774 break; 2775 2776 case 42: /* lha */ 2777 case 43: /* lhau */ 2778 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2779 op->ea = dform_ea(word, regs); 2780 break; 2781 2782 case 44: /* sth */ 2783 case 45: /* sthu */ 2784 op->type = MKOP(STORE, u, 2); 2785 op->ea = dform_ea(word, regs); 2786 break; 2787 2788 case 46: /* lmw */ 2789 if (ra >= rd) 2790 break; /* invalid form, ra in range to load */ 2791 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 2792 op->ea = dform_ea(word, regs); 2793 break; 2794 2795 case 47: /* stmw */ 2796 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 2797 op->ea = dform_ea(word, regs); 2798 break; 2799 2800 #ifdef CONFIG_PPC_FPU 2801 case 48: /* lfs */ 2802 case 49: /* lfsu */ 2803 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2804 op->ea = dform_ea(word, regs); 2805 break; 2806 2807 case 50: /* lfd */ 2808 case 51: /* lfdu */ 2809 op->type = MKOP(LOAD_FP, u, 8); 2810 op->ea = dform_ea(word, regs); 2811 break; 2812 2813 case 52: /* stfs */ 2814 case 53: /* stfsu */ 2815 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2816 op->ea = dform_ea(word, regs); 2817 break; 2818 2819 case 54: /* stfd */ 2820 case 55: /* stfdu */ 2821 op->type = MKOP(STORE_FP, u, 8); 2822 op->ea = dform_ea(word, regs); 2823 break; 2824 #endif 2825 2826 #ifdef __powerpc64__ 2827 case 56: /* lq */ 2828 if (!((rd & 1) || (rd == ra))) 2829 op->type = MKOP(LOAD, 0, 16); 2830 op->ea = dqform_ea(word, regs); 2831 break; 2832 #endif 2833 2834 #ifdef CONFIG_VSX 2835 case 57: /* lfdp, lxsd, lxssp */ 2836 op->ea = dsform_ea(word, regs); 2837 switch (word & 3) { 2838 case 0: /* lfdp */ 2839 if (rd & 1) 2840 break; /* reg must be even */ 2841 op->type = MKOP(LOAD_FP, 0, 16); 2842 break; 2843 case 2: /* lxsd */ 2844 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2845 goto unknown_opcode; 2846 op->reg = rd + 32; 2847 op->type = MKOP(LOAD_VSX, 0, 8); 2848 op->element_size = 8; 2849 op->vsx_flags = VSX_CHECK_VEC; 2850 break; 2851 case 3: /* lxssp */ 2852 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2853 goto unknown_opcode; 2854 op->reg = rd + 32; 2855 op->type = MKOP(LOAD_VSX, 0, 4); 2856 op->element_size = 8; 2857 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2858 break; 2859 } 2860 break; 2861 #endif /* CONFIG_VSX */ 2862 2863 #ifdef __powerpc64__ 2864 case 58: /* ld[u], lwa */ 2865 op->ea = dsform_ea(word, regs); 2866 switch (word & 3) { 2867 case 0: /* ld */ 2868 op->type = MKOP(LOAD, 0, 8); 2869 break; 2870 case 1: /* ldu */ 2871 op->type = MKOP(LOAD, UPDATE, 8); 2872 break; 2873 case 2: /* lwa */ 2874 op->type = MKOP(LOAD, SIGNEXT, 4); 2875 break; 2876 } 2877 break; 2878 #endif 2879 2880 #ifdef CONFIG_VSX 2881 case 6: 2882 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2883 goto unknown_opcode; 2884 op->ea = dqform_ea(word, regs); 2885 op->reg = VSX_REGISTER_XTP(rd); 2886 op->element_size = 32; 2887 switch (word & 0xf) { 2888 case 0: /* lxvp */ 2889 op->type = MKOP(LOAD_VSX, 0, 32); 2890 break; 2891 case 1: /* stxvp */ 2892 op->type = MKOP(STORE_VSX, 0, 32); 2893 break; 2894 } 2895 break; 2896 2897 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ 2898 switch (word & 7) { 2899 case 0: /* stfdp with LSB of DS field = 0 */ 2900 case 4: /* stfdp with LSB of DS field = 1 */ 2901 op->ea = dsform_ea(word, regs); 2902 op->type = MKOP(STORE_FP, 0, 16); 2903 break; 2904 2905 case 1: /* lxv */ 2906 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2907 goto unknown_opcode; 2908 op->ea = dqform_ea(word, regs); 2909 if (word & 8) 2910 op->reg = rd + 32; 2911 op->type = MKOP(LOAD_VSX, 0, 16); 2912 op->element_size = 16; 2913 op->vsx_flags = VSX_CHECK_VEC; 2914 break; 2915 2916 case 2: /* stxsd with LSB of DS field = 0 */ 2917 case 6: /* stxsd with LSB of DS field = 1 */ 2918 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2919 goto unknown_opcode; 2920 op->ea = dsform_ea(word, regs); 2921 op->reg = rd + 32; 2922 op->type = MKOP(STORE_VSX, 0, 8); 2923 op->element_size = 8; 2924 op->vsx_flags = VSX_CHECK_VEC; 2925 break; 2926 2927 case 3: /* stxssp with LSB of DS field = 0 */ 2928 case 7: /* stxssp with LSB of DS field = 1 */ 2929 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2930 goto unknown_opcode; 2931 op->ea = dsform_ea(word, regs); 2932 op->reg = rd + 32; 2933 op->type = MKOP(STORE_VSX, 0, 4); 2934 op->element_size = 8; 2935 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2936 break; 2937 2938 case 5: /* stxv */ 2939 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2940 goto unknown_opcode; 2941 op->ea = dqform_ea(word, regs); 2942 if (word & 8) 2943 op->reg = rd + 32; 2944 op->type = MKOP(STORE_VSX, 0, 16); 2945 op->element_size = 16; 2946 op->vsx_flags = VSX_CHECK_VEC; 2947 break; 2948 } 2949 break; 2950 #endif /* CONFIG_VSX */ 2951 2952 #ifdef __powerpc64__ 2953 case 62: /* std[u] */ 2954 op->ea = dsform_ea(word, regs); 2955 switch (word & 3) { 2956 case 0: /* std */ 2957 op->type = MKOP(STORE, 0, 8); 2958 break; 2959 case 1: /* stdu */ 2960 op->type = MKOP(STORE, UPDATE, 8); 2961 break; 2962 case 2: /* stq */ 2963 if (!(rd & 1)) 2964 op->type = MKOP(STORE, 0, 16); 2965 break; 2966 } 2967 break; 2968 case 1: /* Prefixed instructions */ 2969 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2970 goto unknown_opcode; 2971 2972 prefix_r = GET_PREFIX_R(word); 2973 ra = GET_PREFIX_RA(suffix); 2974 op->update_reg = ra; 2975 rd = (suffix >> 21) & 0x1f; 2976 op->reg = rd; 2977 op->val = regs->gpr[rd]; 2978 2979 suffixopcode = get_op(suffix); 2980 prefixtype = (word >> 24) & 0x3; 2981 switch (prefixtype) { 2982 case 0: /* Type 00 Eight-Byte Load/Store */ 2983 if (prefix_r && ra) 2984 break; 2985 op->ea = mlsd_8lsd_ea(word, suffix, regs); 2986 switch (suffixopcode) { 2987 case 41: /* plwa */ 2988 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); 2989 break; 2990 #ifdef CONFIG_VSX 2991 case 42: /* plxsd */ 2992 op->reg = rd + 32; 2993 op->type = MKOP(LOAD_VSX, PREFIXED, 8); 2994 op->element_size = 8; 2995 op->vsx_flags = VSX_CHECK_VEC; 2996 break; 2997 case 43: /* plxssp */ 2998 op->reg = rd + 32; 2999 op->type = MKOP(LOAD_VSX, PREFIXED, 4); 3000 op->element_size = 8; 3001 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3002 break; 3003 case 46: /* pstxsd */ 3004 op->reg = rd + 32; 3005 op->type = MKOP(STORE_VSX, PREFIXED, 8); 3006 op->element_size = 8; 3007 op->vsx_flags = VSX_CHECK_VEC; 3008 break; 3009 case 47: /* pstxssp */ 3010 op->reg = rd + 32; 3011 op->type = MKOP(STORE_VSX, PREFIXED, 4); 3012 op->element_size = 8; 3013 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3014 break; 3015 case 51: /* plxv1 */ 3016 op->reg += 32; 3017 fallthrough; 3018 case 50: /* plxv0 */ 3019 op->type = MKOP(LOAD_VSX, PREFIXED, 16); 3020 op->element_size = 16; 3021 op->vsx_flags = VSX_CHECK_VEC; 3022 break; 3023 case 55: /* pstxv1 */ 3024 op->reg = rd + 32; 3025 fallthrough; 3026 case 54: /* pstxv0 */ 3027 op->type = MKOP(STORE_VSX, PREFIXED, 16); 3028 op->element_size = 16; 3029 op->vsx_flags = VSX_CHECK_VEC; 3030 break; 3031 #endif /* CONFIG_VSX */ 3032 case 56: /* plq */ 3033 op->type = MKOP(LOAD, PREFIXED, 16); 3034 break; 3035 case 57: /* pld */ 3036 op->type = MKOP(LOAD, PREFIXED, 8); 3037 break; 3038 #ifdef CONFIG_VSX 3039 case 58: /* plxvp */ 3040 op->reg = VSX_REGISTER_XTP(rd); 3041 op->type = MKOP(LOAD_VSX, PREFIXED, 32); 3042 op->element_size = 32; 3043 break; 3044 #endif /* CONFIG_VSX */ 3045 case 60: /* pstq */ 3046 op->type = MKOP(STORE, PREFIXED, 16); 3047 break; 3048 case 61: /* pstd */ 3049 op->type = MKOP(STORE, PREFIXED, 8); 3050 break; 3051 #ifdef CONFIG_VSX 3052 case 62: /* pstxvp */ 3053 op->reg = VSX_REGISTER_XTP(rd); 3054 op->type = MKOP(STORE_VSX, PREFIXED, 32); 3055 op->element_size = 32; 3056 break; 3057 #endif /* CONFIG_VSX */ 3058 } 3059 break; 3060 case 1: /* Type 01 Eight-Byte Register-to-Register */ 3061 break; 3062 case 2: /* Type 10 Modified Load/Store */ 3063 if (prefix_r && ra) 3064 break; 3065 op->ea = mlsd_8lsd_ea(word, suffix, regs); 3066 switch (suffixopcode) { 3067 case 32: /* plwz */ 3068 op->type = MKOP(LOAD, PREFIXED, 4); 3069 break; 3070 case 34: /* plbz */ 3071 op->type = MKOP(LOAD, PREFIXED, 1); 3072 break; 3073 case 36: /* pstw */ 3074 op->type = MKOP(STORE, PREFIXED, 4); 3075 break; 3076 case 38: /* pstb */ 3077 op->type = MKOP(STORE, PREFIXED, 1); 3078 break; 3079 case 40: /* plhz */ 3080 op->type = MKOP(LOAD, PREFIXED, 2); 3081 break; 3082 case 42: /* plha */ 3083 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); 3084 break; 3085 case 44: /* psth */ 3086 op->type = MKOP(STORE, PREFIXED, 2); 3087 break; 3088 case 48: /* plfs */ 3089 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); 3090 break; 3091 case 50: /* plfd */ 3092 op->type = MKOP(LOAD_FP, PREFIXED, 8); 3093 break; 3094 case 52: /* pstfs */ 3095 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); 3096 break; 3097 case 54: /* pstfd */ 3098 op->type = MKOP(STORE_FP, PREFIXED, 8); 3099 break; 3100 } 3101 break; 3102 case 3: /* Type 11 Modified Register-to-Register */ 3103 break; 3104 } 3105 #endif /* __powerpc64__ */ 3106 3107 } 3108 3109 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { 3110 switch (GETTYPE(op->type)) { 3111 case LOAD: 3112 if (ra == rd) 3113 goto unknown_opcode; 3114 fallthrough; 3115 case STORE: 3116 case LOAD_FP: 3117 case STORE_FP: 3118 if (ra == 0) 3119 goto unknown_opcode; 3120 } 3121 } 3122 3123 #ifdef CONFIG_VSX 3124 if ((GETTYPE(op->type) == LOAD_VSX || 3125 GETTYPE(op->type) == STORE_VSX) && 3126 !cpu_has_feature(CPU_FTR_VSX)) { 3127 return -1; 3128 } 3129 #endif /* CONFIG_VSX */ 3130 3131 return 0; 3132 3133 unknown_opcode: 3134 op->type = UNKNOWN; 3135 return 0; 3136 3137 logical_done: 3138 if (word & 1) 3139 set_cr0(regs, op); 3140 logical_done_nocc: 3141 op->reg = ra; 3142 op->type |= SETREG; 3143 return 1; 3144 3145 arith_done: 3146 if (word & 1) 3147 set_cr0(regs, op); 3148 compute_done: 3149 op->reg = rd; 3150 op->type |= SETREG; 3151 return 1; 3152 3153 priv: 3154 op->type = INTERRUPT | 0x700; 3155 op->val = SRR1_PROGPRIV; 3156 return 0; 3157 3158 trap: 3159 op->type = INTERRUPT | 0x700; 3160 op->val = SRR1_PROGTRAP; 3161 return 0; 3162 } 3163 EXPORT_SYMBOL_GPL(analyse_instr); 3164 NOKPROBE_SYMBOL(analyse_instr); 3165 3166 /* 3167 * For PPC32 we always use stwu with r1 to change the stack pointer. 3168 * So this emulated store may corrupt the exception frame, now we 3169 * have to provide the exception frame trampoline, which is pushed 3170 * below the kprobed function stack. So we only update gpr[1] but 3171 * don't emulate the real store operation. We will do real store 3172 * operation safely in exception return code by checking this flag. 3173 */ 3174 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) 3175 { 3176 /* 3177 * Check if we already set since that means we'll 3178 * lose the previous value. 3179 */ 3180 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 3181 set_thread_flag(TIF_EMULATE_STACK_STORE); 3182 return 0; 3183 } 3184 3185 static nokprobe_inline void do_signext(unsigned long *valp, int size) 3186 { 3187 switch (size) { 3188 case 2: 3189 *valp = (signed short) *valp; 3190 break; 3191 case 4: 3192 *valp = (signed int) *valp; 3193 break; 3194 } 3195 } 3196 3197 static nokprobe_inline void do_byterev(unsigned long *valp, int size) 3198 { 3199 switch (size) { 3200 case 2: 3201 *valp = byterev_2(*valp); 3202 break; 3203 case 4: 3204 *valp = byterev_4(*valp); 3205 break; 3206 #ifdef __powerpc64__ 3207 case 8: 3208 *valp = byterev_8(*valp); 3209 break; 3210 #endif 3211 } 3212 } 3213 3214 /* 3215 * Emulate an instruction that can be executed just by updating 3216 * fields in *regs. 3217 */ 3218 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) 3219 { 3220 unsigned long next_pc; 3221 3222 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); 3223 switch (GETTYPE(op->type)) { 3224 case COMPUTE: 3225 if (op->type & SETREG) 3226 regs->gpr[op->reg] = op->val; 3227 if (op->type & SETCC) 3228 regs->ccr = op->ccval; 3229 if (op->type & SETXER) 3230 regs->xer = op->xerval; 3231 break; 3232 3233 case BRANCH: 3234 if (op->type & SETLK) 3235 regs->link = next_pc; 3236 if (op->type & BRTAKEN) 3237 next_pc = op->val; 3238 if (op->type & DECCTR) 3239 --regs->ctr; 3240 break; 3241 3242 case BARRIER: 3243 switch (op->type & BARRIER_MASK) { 3244 case BARRIER_SYNC: 3245 mb(); 3246 break; 3247 case BARRIER_ISYNC: 3248 isync(); 3249 break; 3250 case BARRIER_EIEIO: 3251 eieio(); 3252 break; 3253 case BARRIER_LWSYNC: 3254 asm volatile("lwsync" : : : "memory"); 3255 break; 3256 case BARRIER_PTESYNC: 3257 asm volatile("ptesync" : : : "memory"); 3258 break; 3259 } 3260 break; 3261 3262 case MFSPR: 3263 switch (op->spr) { 3264 case SPRN_XER: 3265 regs->gpr[op->reg] = regs->xer & 0xffffffffUL; 3266 break; 3267 case SPRN_LR: 3268 regs->gpr[op->reg] = regs->link; 3269 break; 3270 case SPRN_CTR: 3271 regs->gpr[op->reg] = regs->ctr; 3272 break; 3273 default: 3274 WARN_ON_ONCE(1); 3275 } 3276 break; 3277 3278 case MTSPR: 3279 switch (op->spr) { 3280 case SPRN_XER: 3281 regs->xer = op->val & 0xffffffffUL; 3282 break; 3283 case SPRN_LR: 3284 regs->link = op->val; 3285 break; 3286 case SPRN_CTR: 3287 regs->ctr = op->val; 3288 break; 3289 default: 3290 WARN_ON_ONCE(1); 3291 } 3292 break; 3293 3294 default: 3295 WARN_ON_ONCE(1); 3296 } 3297 regs_set_return_ip(regs, next_pc); 3298 } 3299 NOKPROBE_SYMBOL(emulate_update_regs); 3300 3301 /* 3302 * Emulate a previously-analysed load or store instruction. 3303 * Return values are: 3304 * 0 = instruction emulated successfully 3305 * -EFAULT = address out of range or access faulted (regs->dar 3306 * contains the faulting address) 3307 * -EACCES = misaligned access, instruction requires alignment 3308 * -EINVAL = unknown operation in *op 3309 */ 3310 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) 3311 { 3312 int err, size, type; 3313 int i, rd, nb; 3314 unsigned int cr; 3315 unsigned long val; 3316 unsigned long ea; 3317 bool cross_endian; 3318 3319 err = 0; 3320 size = GETSIZE(op->type); 3321 type = GETTYPE(op->type); 3322 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 3323 ea = truncate_if_32bit(regs->msr, op->ea); 3324 3325 switch (type) { 3326 case LARX: 3327 if (ea & (size - 1)) 3328 return -EACCES; /* can't handle misaligned */ 3329 if (!address_ok(regs, ea, size)) 3330 return -EFAULT; 3331 err = 0; 3332 val = 0; 3333 switch (size) { 3334 #ifdef __powerpc64__ 3335 case 1: 3336 __get_user_asmx(val, ea, err, "lbarx"); 3337 break; 3338 case 2: 3339 __get_user_asmx(val, ea, err, "lharx"); 3340 break; 3341 #endif 3342 case 4: 3343 __get_user_asmx(val, ea, err, "lwarx"); 3344 break; 3345 #ifdef __powerpc64__ 3346 case 8: 3347 __get_user_asmx(val, ea, err, "ldarx"); 3348 break; 3349 case 16: 3350 err = do_lqarx(ea, ®s->gpr[op->reg]); 3351 break; 3352 #endif 3353 default: 3354 return -EINVAL; 3355 } 3356 if (err) { 3357 regs->dar = ea; 3358 break; 3359 } 3360 if (size < 16) 3361 regs->gpr[op->reg] = val; 3362 break; 3363 3364 case STCX: 3365 if (ea & (size - 1)) 3366 return -EACCES; /* can't handle misaligned */ 3367 if (!address_ok(regs, ea, size)) 3368 return -EFAULT; 3369 err = 0; 3370 switch (size) { 3371 #ifdef __powerpc64__ 3372 case 1: 3373 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3374 break; 3375 case 2: 3376 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3377 break; 3378 #endif 3379 case 4: 3380 __put_user_asmx(op->val, ea, err, "stwcx.", cr); 3381 break; 3382 #ifdef __powerpc64__ 3383 case 8: 3384 __put_user_asmx(op->val, ea, err, "stdcx.", cr); 3385 break; 3386 case 16: 3387 err = do_stqcx(ea, regs->gpr[op->reg], 3388 regs->gpr[op->reg + 1], &cr); 3389 break; 3390 #endif 3391 default: 3392 return -EINVAL; 3393 } 3394 if (!err) 3395 regs->ccr = (regs->ccr & 0x0fffffff) | 3396 (cr & 0xe0000000) | 3397 ((regs->xer >> 3) & 0x10000000); 3398 else 3399 regs->dar = ea; 3400 break; 3401 3402 case LOAD: 3403 #ifdef __powerpc64__ 3404 if (size == 16) { 3405 err = emulate_lq(regs, ea, op->reg, cross_endian); 3406 break; 3407 } 3408 #endif 3409 err = read_mem(®s->gpr[op->reg], ea, size, regs); 3410 if (!err) { 3411 if (op->type & SIGNEXT) 3412 do_signext(®s->gpr[op->reg], size); 3413 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) 3414 do_byterev(®s->gpr[op->reg], size); 3415 } 3416 break; 3417 3418 #ifdef CONFIG_PPC_FPU 3419 case LOAD_FP: 3420 /* 3421 * If the instruction is in userspace, we can emulate it even 3422 * if the VMX state is not live, because we have the state 3423 * stored in the thread_struct. If the instruction is in 3424 * the kernel, we must not touch the state in the thread_struct. 3425 */ 3426 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3427 return 0; 3428 err = do_fp_load(op, ea, regs, cross_endian); 3429 break; 3430 #endif 3431 #ifdef CONFIG_ALTIVEC 3432 case LOAD_VMX: 3433 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3434 return 0; 3435 err = do_vec_load(op->reg, ea, size, regs, cross_endian); 3436 break; 3437 #endif 3438 #ifdef CONFIG_VSX 3439 case LOAD_VSX: { 3440 unsigned long msrbit = MSR_VSX; 3441 3442 /* 3443 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3444 * when the target of the instruction is a vector register. 3445 */ 3446 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3447 msrbit = MSR_VEC; 3448 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3449 return 0; 3450 err = do_vsx_load(op, ea, regs, cross_endian); 3451 break; 3452 } 3453 #endif 3454 case LOAD_MULTI: 3455 if (!address_ok(regs, ea, size)) 3456 return -EFAULT; 3457 rd = op->reg; 3458 for (i = 0; i < size; i += 4) { 3459 unsigned int v32 = 0; 3460 3461 nb = size - i; 3462 if (nb > 4) 3463 nb = 4; 3464 err = copy_mem_in((u8 *) &v32, ea, nb, regs); 3465 if (err) 3466 break; 3467 if (unlikely(cross_endian)) 3468 v32 = byterev_4(v32); 3469 regs->gpr[rd] = v32; 3470 ea += 4; 3471 /* reg number wraps from 31 to 0 for lsw[ix] */ 3472 rd = (rd + 1) & 0x1f; 3473 } 3474 break; 3475 3476 case STORE: 3477 #ifdef __powerpc64__ 3478 if (size == 16) { 3479 err = emulate_stq(regs, ea, op->reg, cross_endian); 3480 break; 3481 } 3482 #endif 3483 if ((op->type & UPDATE) && size == sizeof(long) && 3484 op->reg == 1 && op->update_reg == 1 && 3485 !(regs->msr & MSR_PR) && 3486 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 3487 err = handle_stack_update(ea, regs); 3488 break; 3489 } 3490 if (unlikely(cross_endian)) 3491 do_byterev(&op->val, size); 3492 err = write_mem(op->val, ea, size, regs); 3493 break; 3494 3495 #ifdef CONFIG_PPC_FPU 3496 case STORE_FP: 3497 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3498 return 0; 3499 err = do_fp_store(op, ea, regs, cross_endian); 3500 break; 3501 #endif 3502 #ifdef CONFIG_ALTIVEC 3503 case STORE_VMX: 3504 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3505 return 0; 3506 err = do_vec_store(op->reg, ea, size, regs, cross_endian); 3507 break; 3508 #endif 3509 #ifdef CONFIG_VSX 3510 case STORE_VSX: { 3511 unsigned long msrbit = MSR_VSX; 3512 3513 /* 3514 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3515 * when the target of the instruction is a vector register. 3516 */ 3517 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3518 msrbit = MSR_VEC; 3519 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3520 return 0; 3521 err = do_vsx_store(op, ea, regs, cross_endian); 3522 break; 3523 } 3524 #endif 3525 case STORE_MULTI: 3526 if (!address_ok(regs, ea, size)) 3527 return -EFAULT; 3528 rd = op->reg; 3529 for (i = 0; i < size; i += 4) { 3530 unsigned int v32 = regs->gpr[rd]; 3531 3532 nb = size - i; 3533 if (nb > 4) 3534 nb = 4; 3535 if (unlikely(cross_endian)) 3536 v32 = byterev_4(v32); 3537 err = copy_mem_out((u8 *) &v32, ea, nb, regs); 3538 if (err) 3539 break; 3540 ea += 4; 3541 /* reg number wraps from 31 to 0 for stsw[ix] */ 3542 rd = (rd + 1) & 0x1f; 3543 } 3544 break; 3545 3546 default: 3547 return -EINVAL; 3548 } 3549 3550 if (err) 3551 return err; 3552 3553 if (op->type & UPDATE) 3554 regs->gpr[op->update_reg] = op->ea; 3555 3556 return 0; 3557 } 3558 NOKPROBE_SYMBOL(emulate_loadstore); 3559 3560 /* 3561 * Emulate instructions that cause a transfer of control, 3562 * loads and stores, and a few other instructions. 3563 * Returns 1 if the step was emulated, 0 if not, 3564 * or -1 if the instruction is one that should not be stepped, 3565 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3566 */ 3567 int emulate_step(struct pt_regs *regs, ppc_inst_t instr) 3568 { 3569 struct instruction_op op; 3570 int r, err, type; 3571 unsigned long val; 3572 unsigned long ea; 3573 3574 r = analyse_instr(&op, regs, instr); 3575 if (r < 0) 3576 return r; 3577 if (r > 0) { 3578 emulate_update_regs(regs, &op); 3579 return 1; 3580 } 3581 3582 err = 0; 3583 type = GETTYPE(op.type); 3584 3585 if (OP_IS_LOAD_STORE(type)) { 3586 err = emulate_loadstore(regs, &op); 3587 if (err) 3588 return 0; 3589 goto instr_done; 3590 } 3591 3592 switch (type) { 3593 case CACHEOP: 3594 ea = truncate_if_32bit(regs->msr, op.ea); 3595 if (!address_ok(regs, ea, 8)) 3596 return 0; 3597 switch (op.type & CACHEOP_MASK) { 3598 case DCBST: 3599 __cacheop_user_asmx(ea, err, "dcbst"); 3600 break; 3601 case DCBF: 3602 __cacheop_user_asmx(ea, err, "dcbf"); 3603 break; 3604 case DCBTST: 3605 if (op.reg == 0) 3606 prefetchw((void *) ea); 3607 break; 3608 case DCBT: 3609 if (op.reg == 0) 3610 prefetch((void *) ea); 3611 break; 3612 case ICBI: 3613 __cacheop_user_asmx(ea, err, "icbi"); 3614 break; 3615 case DCBZ: 3616 err = emulate_dcbz(ea, regs); 3617 break; 3618 } 3619 if (err) { 3620 regs->dar = ea; 3621 return 0; 3622 } 3623 goto instr_done; 3624 3625 case MFMSR: 3626 regs->gpr[op.reg] = regs->msr & MSR_MASK; 3627 goto instr_done; 3628 3629 case MTMSR: 3630 val = regs->gpr[op.reg]; 3631 if ((val & MSR_RI) == 0) 3632 /* can't step mtmsr[d] that would clear MSR_RI */ 3633 return -1; 3634 /* here op.val is the mask of bits to change */ 3635 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3636 goto instr_done; 3637 3638 #ifdef CONFIG_PPC64 3639 case SYSCALL: /* sc */ 3640 /* 3641 * N.B. this uses knowledge about how the syscall 3642 * entry code works. If that is changed, this will 3643 * need to be changed also. 3644 */ 3645 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && 3646 cpu_has_feature(CPU_FTR_REAL_LE) && 3647 regs->gpr[0] == 0x1ebe) { 3648 regs_set_return_msr(regs, regs->msr ^ MSR_LE); 3649 goto instr_done; 3650 } 3651 regs->gpr[9] = regs->gpr[13]; 3652 regs->gpr[10] = MSR_KERNEL; 3653 regs->gpr[11] = regs->nip + 4; 3654 regs->gpr[12] = regs->msr & MSR_MASK; 3655 regs->gpr[13] = (unsigned long) get_paca(); 3656 regs_set_return_ip(regs, (unsigned long) &system_call_common); 3657 regs_set_return_msr(regs, MSR_KERNEL); 3658 return 1; 3659 3660 #ifdef CONFIG_PPC_BOOK3S_64 3661 case SYSCALL_VECTORED_0: /* scv 0 */ 3662 regs->gpr[9] = regs->gpr[13]; 3663 regs->gpr[10] = MSR_KERNEL; 3664 regs->gpr[11] = regs->nip + 4; 3665 regs->gpr[12] = regs->msr & MSR_MASK; 3666 regs->gpr[13] = (unsigned long) get_paca(); 3667 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); 3668 regs_set_return_msr(regs, MSR_KERNEL); 3669 return 1; 3670 #endif 3671 3672 case RFI: 3673 return -1; 3674 #endif 3675 } 3676 return 0; 3677 3678 instr_done: 3679 regs_set_return_ip(regs, 3680 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3681 return 1; 3682 } 3683 NOKPROBE_SYMBOL(emulate_step); 3684