1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Single-step support. 4 * 5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 6 */ 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/ptrace.h> 10 #include <linux/prefetch.h> 11 #include <asm/sstep.h> 12 #include <asm/processor.h> 13 #include <linux/uaccess.h> 14 #include <asm/cpu_has_feature.h> 15 #include <asm/cputable.h> 16 #include <asm/disassemble.h> 17 18 #ifdef CONFIG_PPC64 19 /* Bits in SRR1 that are copied from MSR */ 20 #define MSR_MASK 0xffffffff87c0ffffUL 21 #else 22 #define MSR_MASK 0x87c0ffff 23 #endif 24 25 /* Bits in XER */ 26 #define XER_SO 0x80000000U 27 #define XER_OV 0x40000000U 28 #define XER_CA 0x20000000U 29 #define XER_OV32 0x00080000U 30 #define XER_CA32 0x00040000U 31 32 #ifdef CONFIG_VSX 33 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) 34 #endif 35 36 #ifdef CONFIG_PPC_FPU 37 /* 38 * Functions in ldstfp.S 39 */ 40 extern void get_fpr(int rn, double *p); 41 extern void put_fpr(int rn, const double *p); 42 extern void get_vr(int rn, __vector128 *p); 43 extern void put_vr(int rn, __vector128 *p); 44 extern void load_vsrn(int vsr, const void *p); 45 extern void store_vsrn(int vsr, void *p); 46 extern void conv_sp_to_dp(const float *sp, double *dp); 47 extern void conv_dp_to_sp(const double *dp, float *sp); 48 #endif 49 50 #ifdef __powerpc64__ 51 /* 52 * Functions in quad.S 53 */ 54 extern int do_lq(unsigned long ea, unsigned long *regs); 55 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); 56 extern int do_lqarx(unsigned long ea, unsigned long *regs); 57 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, 58 unsigned int *crp); 59 #endif 60 61 #ifdef __LITTLE_ENDIAN__ 62 #define IS_LE 1 63 #define IS_BE 0 64 #else 65 #define IS_LE 0 66 #define IS_BE 1 67 #endif 68 69 /* 70 * Emulate the truncation of 64 bit values in 32-bit mode. 71 */ 72 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, 73 unsigned long val) 74 { 75 if ((msr & MSR_64BIT) == 0) 76 val &= 0xffffffffUL; 77 return val; 78 } 79 80 /* 81 * Determine whether a conditional branch instruction would branch. 82 */ 83 static nokprobe_inline int branch_taken(unsigned int instr, 84 const struct pt_regs *regs, 85 struct instruction_op *op) 86 { 87 unsigned int bo = (instr >> 21) & 0x1f; 88 unsigned int bi; 89 90 if ((bo & 4) == 0) { 91 /* decrement counter */ 92 op->type |= DECCTR; 93 if (((bo >> 1) & 1) ^ (regs->ctr == 1)) 94 return 0; 95 } 96 if ((bo & 0x10) == 0) { 97 /* check bit from CR */ 98 bi = (instr >> 16) & 0x1f; 99 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 100 return 0; 101 } 102 return 1; 103 } 104 105 static nokprobe_inline long address_ok(struct pt_regs *regs, 106 unsigned long ea, int nb) 107 { 108 if (!user_mode(regs)) 109 return 1; 110 if (access_ok((void __user *)ea, nb)) 111 return 1; 112 if (access_ok((void __user *)ea, 1)) 113 /* Access overlaps the end of the user region */ 114 regs->dar = TASK_SIZE_MAX - 1; 115 else 116 regs->dar = ea; 117 return 0; 118 } 119 120 /* 121 * Calculate effective address for a D-form instruction 122 */ 123 static nokprobe_inline unsigned long dform_ea(unsigned int instr, 124 const struct pt_regs *regs) 125 { 126 int ra; 127 unsigned long ea; 128 129 ra = (instr >> 16) & 0x1f; 130 ea = (signed short) instr; /* sign-extend */ 131 if (ra) 132 ea += regs->gpr[ra]; 133 134 return ea; 135 } 136 137 #ifdef __powerpc64__ 138 /* 139 * Calculate effective address for a DS-form instruction 140 */ 141 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 142 const struct pt_regs *regs) 143 { 144 int ra; 145 unsigned long ea; 146 147 ra = (instr >> 16) & 0x1f; 148 ea = (signed short) (instr & ~3); /* sign-extend */ 149 if (ra) 150 ea += regs->gpr[ra]; 151 152 return ea; 153 } 154 155 /* 156 * Calculate effective address for a DQ-form instruction 157 */ 158 static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 159 const struct pt_regs *regs) 160 { 161 int ra; 162 unsigned long ea; 163 164 ra = (instr >> 16) & 0x1f; 165 ea = (signed short) (instr & ~0xf); /* sign-extend */ 166 if (ra) 167 ea += regs->gpr[ra]; 168 169 return ea; 170 } 171 #endif /* __powerpc64 */ 172 173 /* 174 * Calculate effective address for an X-form instruction 175 */ 176 static nokprobe_inline unsigned long xform_ea(unsigned int instr, 177 const struct pt_regs *regs) 178 { 179 int ra, rb; 180 unsigned long ea; 181 182 ra = (instr >> 16) & 0x1f; 183 rb = (instr >> 11) & 0x1f; 184 ea = regs->gpr[rb]; 185 if (ra) 186 ea += regs->gpr[ra]; 187 188 return ea; 189 } 190 191 /* 192 * Calculate effective address for a MLS:D-form / 8LS:D-form 193 * prefixed instruction 194 */ 195 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, 196 unsigned int suffix, 197 const struct pt_regs *regs) 198 { 199 int ra, prefix_r; 200 unsigned int dd; 201 unsigned long ea, d0, d1, d; 202 203 prefix_r = GET_PREFIX_R(instr); 204 ra = GET_PREFIX_RA(suffix); 205 206 d0 = instr & 0x3ffff; 207 d1 = suffix & 0xffff; 208 d = (d0 << 16) | d1; 209 210 /* 211 * sign extend a 34 bit number 212 */ 213 dd = (unsigned int)(d >> 2); 214 ea = (signed int)dd; 215 ea = (ea << 2) | (d & 0x3); 216 217 if (!prefix_r && ra) 218 ea += regs->gpr[ra]; 219 else if (!prefix_r && !ra) 220 ; /* Leave ea as is */ 221 else if (prefix_r) 222 ea += regs->nip; 223 224 /* 225 * (prefix_r && ra) is an invalid form. Should already be 226 * checked for by caller! 227 */ 228 229 return ea; 230 } 231 232 /* 233 * Return the largest power of 2, not greater than sizeof(unsigned long), 234 * such that x is a multiple of it. 235 */ 236 static nokprobe_inline unsigned long max_align(unsigned long x) 237 { 238 x |= sizeof(unsigned long); 239 return x & -x; /* isolates rightmost bit */ 240 } 241 242 static nokprobe_inline unsigned long byterev_2(unsigned long x) 243 { 244 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 245 } 246 247 static nokprobe_inline unsigned long byterev_4(unsigned long x) 248 { 249 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 250 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 251 } 252 253 #ifdef __powerpc64__ 254 static nokprobe_inline unsigned long byterev_8(unsigned long x) 255 { 256 return (byterev_4(x) << 32) | byterev_4(x >> 32); 257 } 258 #endif 259 260 static nokprobe_inline void do_byte_reverse(void *ptr, int nb) 261 { 262 switch (nb) { 263 case 2: 264 *(u16 *)ptr = byterev_2(*(u16 *)ptr); 265 break; 266 case 4: 267 *(u32 *)ptr = byterev_4(*(u32 *)ptr); 268 break; 269 #ifdef __powerpc64__ 270 case 8: 271 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); 272 break; 273 case 16: { 274 unsigned long *up = (unsigned long *)ptr; 275 unsigned long tmp; 276 tmp = byterev_8(up[0]); 277 up[0] = byterev_8(up[1]); 278 up[1] = tmp; 279 break; 280 } 281 case 32: { 282 unsigned long *up = (unsigned long *)ptr; 283 unsigned long tmp; 284 285 tmp = byterev_8(up[0]); 286 up[0] = byterev_8(up[3]); 287 up[3] = tmp; 288 tmp = byterev_8(up[2]); 289 up[2] = byterev_8(up[1]); 290 up[1] = tmp; 291 break; 292 } 293 294 #endif 295 default: 296 WARN_ON_ONCE(1); 297 } 298 } 299 300 static __always_inline int 301 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 302 { 303 unsigned long x = 0; 304 305 switch (nb) { 306 case 1: 307 unsafe_get_user(x, (unsigned char __user *)ea, Efault); 308 break; 309 case 2: 310 unsafe_get_user(x, (unsigned short __user *)ea, Efault); 311 break; 312 case 4: 313 unsafe_get_user(x, (unsigned int __user *)ea, Efault); 314 break; 315 #ifdef __powerpc64__ 316 case 8: 317 unsafe_get_user(x, (unsigned long __user *)ea, Efault); 318 break; 319 #endif 320 } 321 *dest = x; 322 return 0; 323 324 Efault: 325 regs->dar = ea; 326 return -EFAULT; 327 } 328 329 static nokprobe_inline int 330 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 331 { 332 void __user *uea = (void __user *)ea; 333 334 if (is_kernel_addr(ea)) 335 return __read_mem_aligned(dest, ea, nb, regs); 336 337 scoped_user_read_access_size(uea, nb, efault) 338 return __read_mem_aligned(dest, (unsigned long)uea, nb, regs); 339 340 efault: 341 regs->dar = ea; 342 return -EFAULT; 343 } 344 345 /* 346 * Copy from userspace to a buffer, using the largest possible 347 * aligned accesses, up to sizeof(long). 348 */ 349 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 350 { 351 int c; 352 353 for (; nb > 0; nb -= c) { 354 c = max_align(ea); 355 if (c > nb) 356 c = max_align(nb); 357 switch (c) { 358 case 1: 359 unsafe_get_user(*dest, (u8 __user *)ea, Efault); 360 break; 361 case 2: 362 unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); 363 break; 364 case 4: 365 unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); 366 break; 367 #ifdef __powerpc64__ 368 case 8: 369 unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); 370 break; 371 #endif 372 } 373 dest += c; 374 ea += c; 375 } 376 return 0; 377 378 Efault: 379 regs->dar = ea; 380 return -EFAULT; 381 } 382 383 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 384 { 385 void __user *uea = (void __user *)ea; 386 387 if (is_kernel_addr(ea)) 388 return __copy_mem_in(dest, ea, nb, regs); 389 390 scoped_user_read_access_size(uea, nb, efault) 391 return __copy_mem_in(dest, (unsigned long)uea, nb, regs); 392 393 efault: 394 regs->dar = ea; 395 return -EFAULT; 396 } 397 398 static nokprobe_inline int read_mem_unaligned(unsigned long *dest, 399 unsigned long ea, int nb, 400 struct pt_regs *regs) 401 { 402 union { 403 unsigned long ul; 404 u8 b[sizeof(unsigned long)]; 405 } u; 406 int i; 407 int err; 408 409 u.ul = 0; 410 i = IS_BE ? sizeof(unsigned long) - nb : 0; 411 err = copy_mem_in(&u.b[i], ea, nb, regs); 412 if (!err) 413 *dest = u.ul; 414 return err; 415 } 416 417 /* 418 * Read memory at address ea for nb bytes, return 0 for success 419 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 420 * If nb < sizeof(long), the result is right-justified on BE systems. 421 */ 422 static int read_mem(unsigned long *dest, unsigned long ea, int nb, 423 struct pt_regs *regs) 424 { 425 if (!address_ok(regs, ea, nb)) 426 return -EFAULT; 427 if ((ea & (nb - 1)) == 0) 428 return read_mem_aligned(dest, ea, nb, regs); 429 return read_mem_unaligned(dest, ea, nb, regs); 430 } 431 NOKPROBE_SYMBOL(read_mem); 432 433 static __always_inline int 434 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 435 { 436 switch (nb) { 437 case 1: 438 unsafe_put_user(val, (unsigned char __user *)ea, Efault); 439 break; 440 case 2: 441 unsafe_put_user(val, (unsigned short __user *)ea, Efault); 442 break; 443 case 4: 444 unsafe_put_user(val, (unsigned int __user *)ea, Efault); 445 break; 446 #ifdef __powerpc64__ 447 case 8: 448 unsafe_put_user(val, (unsigned long __user *)ea, Efault); 449 break; 450 #endif 451 } 452 return 0; 453 454 Efault: 455 regs->dar = ea; 456 return -EFAULT; 457 } 458 459 static nokprobe_inline int 460 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 461 { 462 void __user *uea = (void __user *)ea; 463 464 if (is_kernel_addr(ea)) 465 return __write_mem_aligned(val, ea, nb, regs); 466 467 scoped_user_write_access_size(uea, nb, efault) 468 return __write_mem_aligned(val, (unsigned long)uea, nb, regs); 469 470 efault: 471 regs->dar = ea; 472 return -EFAULT; 473 } 474 475 /* 476 * Copy from a buffer to userspace, using the largest possible 477 * aligned accesses, up to sizeof(long). 478 */ 479 static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 480 { 481 int c; 482 483 for (; nb > 0; nb -= c) { 484 c = max_align(ea); 485 if (c > nb) 486 c = max_align(nb); 487 switch (c) { 488 case 1: 489 unsafe_put_user(*dest, (u8 __user *)ea, Efault); 490 break; 491 case 2: 492 unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); 493 break; 494 case 4: 495 unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); 496 break; 497 #ifdef __powerpc64__ 498 case 8: 499 unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); 500 break; 501 #endif 502 } 503 dest += c; 504 ea += c; 505 } 506 return 0; 507 508 Efault: 509 regs->dar = ea; 510 return -EFAULT; 511 } 512 513 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 514 { 515 void __user *uea = (void __user *)ea; 516 517 if (is_kernel_addr(ea)) 518 return __copy_mem_out(dest, ea, nb, regs); 519 520 scoped_user_write_access_size(uea, nb, efault) 521 return __copy_mem_out(dest, (unsigned long)uea, nb, regs); 522 523 efault: 524 regs->dar = ea; 525 return -EFAULT; 526 } 527 528 static nokprobe_inline int write_mem_unaligned(unsigned long val, 529 unsigned long ea, int nb, 530 struct pt_regs *regs) 531 { 532 union { 533 unsigned long ul; 534 u8 b[sizeof(unsigned long)]; 535 } u; 536 int i; 537 538 u.ul = val; 539 i = IS_BE ? sizeof(unsigned long) - nb : 0; 540 return copy_mem_out(&u.b[i], ea, nb, regs); 541 } 542 543 /* 544 * Write memory at address ea for nb bytes, return 0 for success 545 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 546 */ 547 static int write_mem(unsigned long val, unsigned long ea, int nb, 548 struct pt_regs *regs) 549 { 550 if (!address_ok(regs, ea, nb)) 551 return -EFAULT; 552 if ((ea & (nb - 1)) == 0) 553 return write_mem_aligned(val, ea, nb, regs); 554 return write_mem_unaligned(val, ea, nb, regs); 555 } 556 NOKPROBE_SYMBOL(write_mem); 557 558 #ifdef CONFIG_PPC_FPU 559 /* 560 * These access either the real FP register or the image in the 561 * thread_struct, depending on regs->msr & MSR_FP. 562 */ 563 static int do_fp_load(struct instruction_op *op, unsigned long ea, 564 struct pt_regs *regs, bool cross_endian) 565 { 566 int err, rn, nb; 567 union { 568 int i; 569 unsigned int u; 570 float f; 571 double d[2]; 572 unsigned long l[2]; 573 u8 b[2 * sizeof(double)]; 574 } u; 575 576 nb = GETSIZE(op->type); 577 if (nb > sizeof(u)) 578 return -EINVAL; 579 if (!address_ok(regs, ea, nb)) 580 return -EFAULT; 581 rn = op->reg; 582 err = copy_mem_in(u.b, ea, nb, regs); 583 if (err) 584 return err; 585 if (unlikely(cross_endian)) { 586 do_byte_reverse(u.b, min(nb, 8)); 587 if (nb == 16) 588 do_byte_reverse(&u.b[8], 8); 589 } 590 preempt_disable(); 591 if (nb == 4) { 592 if (op->type & FPCONV) 593 conv_sp_to_dp(&u.f, &u.d[0]); 594 else if (op->type & SIGNEXT) 595 u.l[0] = u.i; 596 else 597 u.l[0] = u.u; 598 } 599 if (regs->msr & MSR_FP) 600 put_fpr(rn, &u.d[0]); 601 else 602 current->thread.TS_FPR(rn) = u.l[0]; 603 if (nb == 16) { 604 /* lfdp */ 605 rn |= 1; 606 if (regs->msr & MSR_FP) 607 put_fpr(rn, &u.d[1]); 608 else 609 current->thread.TS_FPR(rn) = u.l[1]; 610 } 611 preempt_enable(); 612 return 0; 613 } 614 NOKPROBE_SYMBOL(do_fp_load); 615 616 static int do_fp_store(struct instruction_op *op, unsigned long ea, 617 struct pt_regs *regs, bool cross_endian) 618 { 619 int rn, nb; 620 union { 621 unsigned int u; 622 float f; 623 double d[2]; 624 unsigned long l[2]; 625 u8 b[2 * sizeof(double)]; 626 } u; 627 628 nb = GETSIZE(op->type); 629 if (nb > sizeof(u)) 630 return -EINVAL; 631 if (!address_ok(regs, ea, nb)) 632 return -EFAULT; 633 rn = op->reg; 634 preempt_disable(); 635 if (regs->msr & MSR_FP) 636 get_fpr(rn, &u.d[0]); 637 else 638 u.l[0] = current->thread.TS_FPR(rn); 639 if (nb == 4) { 640 if (op->type & FPCONV) 641 conv_dp_to_sp(&u.d[0], &u.f); 642 else 643 u.u = u.l[0]; 644 } 645 if (nb == 16) { 646 rn |= 1; 647 if (regs->msr & MSR_FP) 648 get_fpr(rn, &u.d[1]); 649 else 650 u.l[1] = current->thread.TS_FPR(rn); 651 } 652 preempt_enable(); 653 if (unlikely(cross_endian)) { 654 do_byte_reverse(u.b, min(nb, 8)); 655 if (nb == 16) 656 do_byte_reverse(&u.b[8], 8); 657 } 658 return copy_mem_out(u.b, ea, nb, regs); 659 } 660 NOKPROBE_SYMBOL(do_fp_store); 661 #endif 662 663 #ifdef CONFIG_ALTIVEC 664 /* For Altivec/VMX, no need to worry about alignment */ 665 static nokprobe_inline int do_vec_load(int rn, unsigned long ea, 666 int size, struct pt_regs *regs, 667 bool cross_endian) 668 { 669 int err; 670 union { 671 __vector128 v; 672 u8 b[sizeof(__vector128)]; 673 } u = {}; 674 675 if (size > sizeof(u)) 676 return -EINVAL; 677 678 if (!address_ok(regs, ea & ~0xfUL, 16)) 679 return -EFAULT; 680 /* align to multiple of size */ 681 ea &= ~(size - 1); 682 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); 683 if (err) 684 return err; 685 if (unlikely(cross_endian)) 686 do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); 687 preempt_disable(); 688 if (regs->msr & MSR_VEC) 689 put_vr(rn, &u.v); 690 else 691 current->thread.vr_state.vr[rn] = u.v; 692 preempt_enable(); 693 return 0; 694 } 695 696 static nokprobe_inline int do_vec_store(int rn, unsigned long ea, 697 int size, struct pt_regs *regs, 698 bool cross_endian) 699 { 700 union { 701 __vector128 v; 702 u8 b[sizeof(__vector128)]; 703 } u; 704 705 if (size > sizeof(u)) 706 return -EINVAL; 707 708 if (!address_ok(regs, ea & ~0xfUL, 16)) 709 return -EFAULT; 710 /* align to multiple of size */ 711 ea &= ~(size - 1); 712 713 preempt_disable(); 714 if (regs->msr & MSR_VEC) 715 get_vr(rn, &u.v); 716 else 717 u.v = current->thread.vr_state.vr[rn]; 718 preempt_enable(); 719 if (unlikely(cross_endian)) 720 do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); 721 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 722 } 723 #endif /* CONFIG_ALTIVEC */ 724 725 #ifdef __powerpc64__ 726 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, 727 int reg, bool cross_endian) 728 { 729 int err; 730 731 if (!address_ok(regs, ea, 16)) 732 return -EFAULT; 733 /* if aligned, should be atomic */ 734 if ((ea & 0xf) == 0) { 735 err = do_lq(ea, ®s->gpr[reg]); 736 } else { 737 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); 738 if (!err) 739 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); 740 } 741 if (!err && unlikely(cross_endian)) 742 do_byte_reverse(®s->gpr[reg], 16); 743 return err; 744 } 745 746 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, 747 int reg, bool cross_endian) 748 { 749 int err; 750 unsigned long vals[2]; 751 752 if (!address_ok(regs, ea, 16)) 753 return -EFAULT; 754 vals[0] = regs->gpr[reg]; 755 vals[1] = regs->gpr[reg + 1]; 756 if (unlikely(cross_endian)) 757 do_byte_reverse(vals, 16); 758 759 /* if aligned, should be atomic */ 760 if ((ea & 0xf) == 0) 761 return do_stq(ea, vals[0], vals[1]); 762 763 err = write_mem(vals[IS_LE], ea, 8, regs); 764 if (!err) 765 err = write_mem(vals[IS_BE], ea + 8, 8, regs); 766 return err; 767 } 768 #endif /* __powerpc64 */ 769 770 #ifdef CONFIG_VSX 771 static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, 772 const void *mem, bool rev) 773 { 774 int size, read_size; 775 int i, j; 776 const unsigned int *wp; 777 const unsigned short *hp; 778 const unsigned char *bp; 779 780 size = GETSIZE(op->type); 781 reg->d[0] = reg->d[1] = 0; 782 783 switch (op->element_size) { 784 case 32: 785 /* [p]lxvp[x] */ 786 case 16: 787 /* whole vector; lxv[x] or lxvl[l] */ 788 if (size == 0) 789 break; 790 memcpy(reg, mem, size); 791 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 792 rev = !rev; 793 if (rev) 794 do_byte_reverse(reg, size); 795 break; 796 case 8: 797 /* scalar loads, lxvd2x, lxvdsx */ 798 read_size = (size >= 8) ? 8 : size; 799 i = IS_LE ? 8 : 8 - read_size; 800 memcpy(®->b[i], mem, read_size); 801 if (rev) 802 do_byte_reverse(®->b[i], 8); 803 if (size < 8) { 804 if (op->type & SIGNEXT) { 805 /* size == 4 is the only case here */ 806 reg->d[IS_LE] = (signed int) reg->d[IS_LE]; 807 } else if (op->vsx_flags & VSX_FPCONV) { 808 preempt_disable(); 809 conv_sp_to_dp(®->fp[1 + IS_LE], 810 ®->dp[IS_LE]); 811 preempt_enable(); 812 } 813 } else { 814 if (size == 16) { 815 unsigned long v = *(unsigned long *)(mem + 8); 816 reg->d[IS_BE] = !rev ? v : byterev_8(v); 817 } else if (op->vsx_flags & VSX_SPLAT) 818 reg->d[IS_BE] = reg->d[IS_LE]; 819 } 820 break; 821 case 4: 822 /* lxvw4x, lxvwsx */ 823 wp = mem; 824 for (j = 0; j < size / 4; ++j) { 825 i = IS_LE ? 3 - j : j; 826 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); 827 } 828 if (op->vsx_flags & VSX_SPLAT) { 829 u32 val = reg->w[IS_LE ? 3 : 0]; 830 for (; j < 4; ++j) { 831 i = IS_LE ? 3 - j : j; 832 reg->w[i] = val; 833 } 834 } 835 break; 836 case 2: 837 /* lxvh8x */ 838 hp = mem; 839 for (j = 0; j < size / 2; ++j) { 840 i = IS_LE ? 7 - j : j; 841 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); 842 } 843 break; 844 case 1: 845 /* lxvb16x */ 846 bp = mem; 847 for (j = 0; j < size; ++j) { 848 i = IS_LE ? 15 - j : j; 849 reg->b[i] = *bp++; 850 } 851 break; 852 } 853 } 854 855 static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, 856 void *mem, bool rev) 857 { 858 int size, write_size; 859 int i, j; 860 union vsx_reg buf; 861 unsigned int *wp; 862 unsigned short *hp; 863 unsigned char *bp; 864 865 size = GETSIZE(op->type); 866 867 switch (op->element_size) { 868 case 32: 869 /* [p]stxvp[x] */ 870 if (size == 0) 871 break; 872 if (rev) { 873 /* reverse 32 bytes */ 874 union vsx_reg buf32[2]; 875 buf32[0].d[0] = byterev_8(reg[1].d[1]); 876 buf32[0].d[1] = byterev_8(reg[1].d[0]); 877 buf32[1].d[0] = byterev_8(reg[0].d[1]); 878 buf32[1].d[1] = byterev_8(reg[0].d[0]); 879 memcpy(mem, buf32, size); 880 } else { 881 memcpy(mem, reg, size); 882 } 883 break; 884 case 16: 885 /* stxv, stxvx, stxvl, stxvll */ 886 if (size == 0) 887 break; 888 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 889 rev = !rev; 890 if (rev) { 891 /* reverse 16 bytes */ 892 buf.d[0] = byterev_8(reg->d[1]); 893 buf.d[1] = byterev_8(reg->d[0]); 894 reg = &buf; 895 } 896 memcpy(mem, reg, size); 897 break; 898 case 8: 899 /* scalar stores, stxvd2x */ 900 write_size = (size >= 8) ? 8 : size; 901 i = IS_LE ? 8 : 8 - write_size; 902 if (size < 8 && op->vsx_flags & VSX_FPCONV) { 903 buf.d[0] = buf.d[1] = 0; 904 preempt_disable(); 905 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); 906 preempt_enable(); 907 reg = &buf; 908 } 909 memcpy(mem, ®->b[i], write_size); 910 if (size == 16) 911 memcpy(mem + 8, ®->d[IS_BE], 8); 912 if (unlikely(rev)) { 913 do_byte_reverse(mem, write_size); 914 if (size == 16) 915 do_byte_reverse(mem + 8, 8); 916 } 917 break; 918 case 4: 919 /* stxvw4x */ 920 wp = mem; 921 for (j = 0; j < size / 4; ++j) { 922 i = IS_LE ? 3 - j : j; 923 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); 924 } 925 break; 926 case 2: 927 /* stxvh8x */ 928 hp = mem; 929 for (j = 0; j < size / 2; ++j) { 930 i = IS_LE ? 7 - j : j; 931 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); 932 } 933 break; 934 case 1: 935 /* stvxb16x */ 936 bp = mem; 937 for (j = 0; j < size; ++j) { 938 i = IS_LE ? 15 - j : j; 939 *bp++ = reg->b[i]; 940 } 941 break; 942 } 943 } 944 945 static nokprobe_inline int do_vsx_load(struct instruction_op *op, 946 unsigned long ea, struct pt_regs *regs, 947 bool cross_endian) 948 { 949 int reg = op->reg; 950 int i, j, nr_vsx_regs; 951 u8 mem[32]; 952 union vsx_reg buf[2]; 953 int size = GETSIZE(op->type); 954 955 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) 956 return -EFAULT; 957 958 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 959 emulate_vsx_load(op, buf, mem, cross_endian); 960 preempt_disable(); 961 if (reg < 32) { 962 /* FP regs + extensions */ 963 if (regs->msr & MSR_FP) { 964 for (i = 0; i < nr_vsx_regs; i++) { 965 j = IS_LE ? nr_vsx_regs - i - 1 : i; 966 load_vsrn(reg + i, &buf[j].v); 967 } 968 } else { 969 for (i = 0; i < nr_vsx_regs; i++) { 970 j = IS_LE ? nr_vsx_regs - i - 1 : i; 971 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; 972 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; 973 } 974 } 975 } else { 976 if (regs->msr & MSR_VEC) { 977 for (i = 0; i < nr_vsx_regs; i++) { 978 j = IS_LE ? nr_vsx_regs - i - 1 : i; 979 load_vsrn(reg + i, &buf[j].v); 980 } 981 } else { 982 for (i = 0; i < nr_vsx_regs; i++) { 983 j = IS_LE ? nr_vsx_regs - i - 1 : i; 984 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; 985 } 986 } 987 } 988 preempt_enable(); 989 return 0; 990 } 991 992 static nokprobe_inline int do_vsx_store(struct instruction_op *op, 993 unsigned long ea, struct pt_regs *regs, 994 bool cross_endian) 995 { 996 int reg = op->reg; 997 int i, j, nr_vsx_regs; 998 u8 mem[32]; 999 union vsx_reg buf[2]; 1000 int size = GETSIZE(op->type); 1001 1002 if (!address_ok(regs, ea, size)) 1003 return -EFAULT; 1004 1005 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 1006 preempt_disable(); 1007 if (reg < 32) { 1008 /* FP regs + extensions */ 1009 if (regs->msr & MSR_FP) { 1010 for (i = 0; i < nr_vsx_regs; i++) { 1011 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1012 store_vsrn(reg + i, &buf[j].v); 1013 } 1014 } else { 1015 for (i = 0; i < nr_vsx_regs; i++) { 1016 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1017 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; 1018 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; 1019 } 1020 } 1021 } else { 1022 if (regs->msr & MSR_VEC) { 1023 for (i = 0; i < nr_vsx_regs; i++) { 1024 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1025 store_vsrn(reg + i, &buf[j].v); 1026 } 1027 } else { 1028 for (i = 0; i < nr_vsx_regs; i++) { 1029 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1030 buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; 1031 } 1032 } 1033 } 1034 preempt_enable(); 1035 emulate_vsx_store(op, buf, mem, cross_endian); 1036 return copy_mem_out(mem, ea, size, regs); 1037 } 1038 #endif /* CONFIG_VSX */ 1039 1040 static __always_inline int __emulate_dcbz(unsigned long ea) 1041 { 1042 unsigned long i; 1043 unsigned long size = l1_dcache_bytes(); 1044 1045 for (i = 0; i < size; i += sizeof(long)) 1046 unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); 1047 1048 return 0; 1049 1050 Efault: 1051 return -EFAULT; 1052 } 1053 1054 int emulate_dcbz(unsigned long ea, struct pt_regs *regs) 1055 { 1056 void __user *uea = (void __user *)ea; 1057 int err; 1058 unsigned long size = l1_dcache_bytes(); 1059 1060 ea = truncate_if_32bit(regs->msr, ea); 1061 ea &= ~(size - 1); 1062 if (!address_ok(regs, ea, size)) 1063 return -EFAULT; 1064 1065 if (is_kernel_addr(ea)) 1066 err = __emulate_dcbz(ea); 1067 else 1068 scoped_user_write_access_size(uea, size, efault) 1069 err = __emulate_dcbz((unsigned long)uea); 1070 1071 if (err) 1072 regs->dar = ea; 1073 1074 return err; 1075 1076 efault: 1077 regs->dar = ea; 1078 return -EFAULT; 1079 } 1080 NOKPROBE_SYMBOL(emulate_dcbz); 1081 1082 #define __put_user_asmx(x, addr, err, op, cr) \ 1083 __asm__ __volatile__( \ 1084 ".machine push\n" \ 1085 ".machine power8\n" \ 1086 "1: " op " %2,0,%3\n" \ 1087 ".machine pop\n" \ 1088 " mfcr %1\n" \ 1089 "2:\n" \ 1090 ".section .fixup,\"ax\"\n" \ 1091 "3: li %0,%4\n" \ 1092 " b 2b\n" \ 1093 ".previous\n" \ 1094 EX_TABLE(1b, 3b) \ 1095 : "=r" (err), "=r" (cr) \ 1096 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 1097 1098 #define __get_user_asmx(x, addr, err, op) \ 1099 __asm__ __volatile__( \ 1100 ".machine push\n" \ 1101 ".machine power8\n" \ 1102 "1: "op" %1,0,%2\n" \ 1103 ".machine pop\n" \ 1104 "2:\n" \ 1105 ".section .fixup,\"ax\"\n" \ 1106 "3: li %0,%3\n" \ 1107 " b 2b\n" \ 1108 ".previous\n" \ 1109 EX_TABLE(1b, 3b) \ 1110 : "=r" (err), "=r" (x) \ 1111 : "r" (addr), "i" (-EFAULT), "0" (err)) 1112 1113 #define __cacheop_user_asmx(addr, err, op) \ 1114 __asm__ __volatile__( \ 1115 "1: "op" 0,%1\n" \ 1116 "2:\n" \ 1117 ".section .fixup,\"ax\"\n" \ 1118 "3: li %0,%3\n" \ 1119 " b 2b\n" \ 1120 ".previous\n" \ 1121 EX_TABLE(1b, 3b) \ 1122 : "=r" (err) \ 1123 : "r" (addr), "i" (-EFAULT), "0" (err)) 1124 1125 static nokprobe_inline void set_cr0(const struct pt_regs *regs, 1126 struct instruction_op *op) 1127 { 1128 long val = op->val; 1129 1130 op->type |= SETCC; 1131 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 1132 if (!(regs->msr & MSR_64BIT)) 1133 val = (int) val; 1134 if (val < 0) 1135 op->ccval |= 0x80000000; 1136 else if (val > 0) 1137 op->ccval |= 0x40000000; 1138 else 1139 op->ccval |= 0x20000000; 1140 } 1141 1142 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) 1143 { 1144 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1145 if (val) 1146 op->xerval |= XER_CA32; 1147 else 1148 op->xerval &= ~XER_CA32; 1149 } 1150 } 1151 1152 static nokprobe_inline void add_with_carry(const struct pt_regs *regs, 1153 struct instruction_op *op, int rd, 1154 unsigned long val1, unsigned long val2, 1155 unsigned long carry_in) 1156 { 1157 unsigned long val = val1 + val2; 1158 1159 if (carry_in) 1160 ++val; 1161 op->type = COMPUTE | SETREG | SETXER; 1162 op->reg = rd; 1163 op->val = val; 1164 val = truncate_if_32bit(regs->msr, val); 1165 val1 = truncate_if_32bit(regs->msr, val1); 1166 op->xerval = regs->xer; 1167 if (val < val1 || (carry_in && val == val1)) 1168 op->xerval |= XER_CA; 1169 else 1170 op->xerval &= ~XER_CA; 1171 1172 set_ca32(op, (unsigned int)val < (unsigned int)val1 || 1173 (carry_in && (unsigned int)val == (unsigned int)val1)); 1174 } 1175 1176 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, 1177 struct instruction_op *op, 1178 long v1, long v2, int crfld) 1179 { 1180 unsigned int crval, shift; 1181 1182 op->type = COMPUTE | SETCC; 1183 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1184 if (v1 < v2) 1185 crval |= 8; 1186 else if (v1 > v2) 1187 crval |= 4; 1188 else 1189 crval |= 2; 1190 shift = (7 - crfld) * 4; 1191 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1192 } 1193 1194 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, 1195 struct instruction_op *op, 1196 unsigned long v1, 1197 unsigned long v2, int crfld) 1198 { 1199 unsigned int crval, shift; 1200 1201 op->type = COMPUTE | SETCC; 1202 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1203 if (v1 < v2) 1204 crval |= 8; 1205 else if (v1 > v2) 1206 crval |= 4; 1207 else 1208 crval |= 2; 1209 shift = (7 - crfld) * 4; 1210 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1211 } 1212 1213 static nokprobe_inline void do_cmpb(const struct pt_regs *regs, 1214 struct instruction_op *op, 1215 unsigned long v1, unsigned long v2) 1216 { 1217 unsigned long long out_val, mask; 1218 int i; 1219 1220 out_val = 0; 1221 for (i = 0; i < 8; i++) { 1222 mask = 0xffUL << (i * 8); 1223 if ((v1 & mask) == (v2 & mask)) 1224 out_val |= mask; 1225 } 1226 op->val = out_val; 1227 } 1228 1229 /* 1230 * The size parameter is used to adjust the equivalent popcnt instruction. 1231 * popcntb = 8, popcntw = 32, popcntd = 64 1232 */ 1233 static nokprobe_inline void do_popcnt(const struct pt_regs *regs, 1234 struct instruction_op *op, 1235 unsigned long v1, int size) 1236 { 1237 unsigned long long out = v1; 1238 1239 out -= (out >> 1) & 0x5555555555555555ULL; 1240 out = (0x3333333333333333ULL & out) + 1241 (0x3333333333333333ULL & (out >> 2)); 1242 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1243 1244 if (size == 8) { /* popcntb */ 1245 op->val = out; 1246 return; 1247 } 1248 out += out >> 8; 1249 out += out >> 16; 1250 if (size == 32) { /* popcntw */ 1251 op->val = out & 0x0000003f0000003fULL; 1252 return; 1253 } 1254 1255 out = (out + (out >> 32)) & 0x7f; 1256 op->val = out; /* popcntd */ 1257 } 1258 1259 #ifdef CONFIG_PPC64 1260 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, 1261 struct instruction_op *op, 1262 unsigned long v1, unsigned long v2) 1263 { 1264 unsigned char perm, idx; 1265 unsigned int i; 1266 1267 perm = 0; 1268 for (i = 0; i < 8; i++) { 1269 idx = (v1 >> (i * 8)) & 0xff; 1270 if (idx < 64) 1271 if (v2 & PPC_BIT(idx)) 1272 perm |= 1 << i; 1273 } 1274 op->val = perm; 1275 } 1276 #endif /* CONFIG_PPC64 */ 1277 /* 1278 * The size parameter adjusts the equivalent prty instruction. 1279 * prtyw = 32, prtyd = 64 1280 */ 1281 static nokprobe_inline void do_prty(const struct pt_regs *regs, 1282 struct instruction_op *op, 1283 unsigned long v, int size) 1284 { 1285 unsigned long long res = v ^ (v >> 8); 1286 1287 res ^= res >> 16; 1288 if (size == 32) { /* prtyw */ 1289 op->val = res & 0x0000000100000001ULL; 1290 return; 1291 } 1292 1293 res ^= res >> 32; 1294 op->val = res & 1; /*prtyd */ 1295 } 1296 1297 static nokprobe_inline int trap_compare(long v1, long v2) 1298 { 1299 int ret = 0; 1300 1301 if (v1 < v2) 1302 ret |= 0x10; 1303 else if (v1 > v2) 1304 ret |= 0x08; 1305 else 1306 ret |= 0x04; 1307 if ((unsigned long)v1 < (unsigned long)v2) 1308 ret |= 0x02; 1309 else if ((unsigned long)v1 > (unsigned long)v2) 1310 ret |= 0x01; 1311 return ret; 1312 } 1313 1314 /* 1315 * Elements of 32-bit rotate and mask instructions. 1316 */ 1317 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 1318 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 1319 #ifdef __powerpc64__ 1320 #define MASK64_L(mb) (~0UL >> (mb)) 1321 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 1322 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 1323 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 1324 #else 1325 #define DATA32(x) (x) 1326 #endif 1327 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 1328 1329 /* 1330 * Decode an instruction, and return information about it in *op 1331 * without changing *regs. 1332 * Integer arithmetic and logical instructions, branches, and barrier 1333 * instructions can be emulated just using the information in *op. 1334 * 1335 * Return value is 1 if the instruction can be emulated just by 1336 * updating *regs with the information in *op, -1 if we need the 1337 * GPRs but *regs doesn't contain the full register set, or 0 1338 * otherwise. 1339 */ 1340 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1341 ppc_inst_t instr) 1342 { 1343 #ifdef CONFIG_PPC64 1344 unsigned int suffixopcode, prefixtype, prefix_r; 1345 #endif 1346 unsigned int opcode, ra, rb, rc, rd, spr, u; 1347 unsigned long int imm; 1348 unsigned long int val, val2; 1349 unsigned int mb, me, sh; 1350 unsigned int word, suffix; 1351 long ival; 1352 1353 word = ppc_inst_val(instr); 1354 suffix = ppc_inst_suffix(instr); 1355 1356 op->type = COMPUTE; 1357 1358 opcode = ppc_inst_primary_opcode(instr); 1359 switch (opcode) { 1360 case 16: /* bc */ 1361 op->type = BRANCH; 1362 imm = (signed short)(word & 0xfffc); 1363 if ((word & 2) == 0) 1364 imm += regs->nip; 1365 op->val = truncate_if_32bit(regs->msr, imm); 1366 if (word & 1) 1367 op->type |= SETLK; 1368 if (branch_taken(word, regs, op)) 1369 op->type |= BRTAKEN; 1370 return 1; 1371 case 17: /* sc */ 1372 if ((word & 0xfe2) == 2) 1373 op->type = SYSCALL; 1374 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 1375 (word & 0xfe3) == 1) { /* scv */ 1376 op->type = SYSCALL_VECTORED_0; 1377 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1378 goto unknown_opcode; 1379 } else 1380 op->type = UNKNOWN; 1381 return 0; 1382 case 18: /* b */ 1383 op->type = BRANCH | BRTAKEN; 1384 imm = word & 0x03fffffc; 1385 if (imm & 0x02000000) 1386 imm -= 0x04000000; 1387 if ((word & 2) == 0) 1388 imm += regs->nip; 1389 op->val = truncate_if_32bit(regs->msr, imm); 1390 if (word & 1) 1391 op->type |= SETLK; 1392 return 1; 1393 case 19: 1394 switch ((word >> 1) & 0x3ff) { 1395 case 0: /* mcrf */ 1396 op->type = COMPUTE + SETCC; 1397 rd = 7 - ((word >> 23) & 0x7); 1398 ra = 7 - ((word >> 18) & 0x7); 1399 rd *= 4; 1400 ra *= 4; 1401 val = (regs->ccr >> ra) & 0xf; 1402 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 1403 return 1; 1404 1405 case 16: /* bclr */ 1406 case 528: /* bcctr */ 1407 op->type = BRANCH; 1408 imm = (word & 0x400)? regs->ctr: regs->link; 1409 op->val = truncate_if_32bit(regs->msr, imm); 1410 if (word & 1) 1411 op->type |= SETLK; 1412 if (branch_taken(word, regs, op)) 1413 op->type |= BRTAKEN; 1414 return 1; 1415 1416 case 18: /* rfid, scary */ 1417 if (user_mode(regs)) 1418 goto priv; 1419 op->type = RFI; 1420 return 0; 1421 1422 case 150: /* isync */ 1423 op->type = BARRIER | BARRIER_ISYNC; 1424 return 1; 1425 1426 case 33: /* crnor */ 1427 case 129: /* crandc */ 1428 case 193: /* crxor */ 1429 case 225: /* crnand */ 1430 case 257: /* crand */ 1431 case 289: /* creqv */ 1432 case 417: /* crorc */ 1433 case 449: /* cror */ 1434 op->type = COMPUTE + SETCC; 1435 ra = (word >> 16) & 0x1f; 1436 rb = (word >> 11) & 0x1f; 1437 rd = (word >> 21) & 0x1f; 1438 ra = (regs->ccr >> (31 - ra)) & 1; 1439 rb = (regs->ccr >> (31 - rb)) & 1; 1440 val = (word >> (6 + ra * 2 + rb)) & 1; 1441 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | 1442 (val << (31 - rd)); 1443 return 1; 1444 } 1445 break; 1446 case 31: 1447 switch ((word >> 1) & 0x3ff) { 1448 case 598: /* sync */ 1449 op->type = BARRIER + BARRIER_SYNC; 1450 #ifdef __powerpc64__ 1451 switch ((word >> 21) & 3) { 1452 case 1: /* lwsync */ 1453 op->type = BARRIER + BARRIER_LWSYNC; 1454 break; 1455 case 2: /* ptesync */ 1456 op->type = BARRIER + BARRIER_PTESYNC; 1457 break; 1458 } 1459 #endif 1460 return 1; 1461 1462 case 854: /* eieio */ 1463 op->type = BARRIER + BARRIER_EIEIO; 1464 return 1; 1465 } 1466 break; 1467 } 1468 1469 rd = (word >> 21) & 0x1f; 1470 ra = (word >> 16) & 0x1f; 1471 rb = (word >> 11) & 0x1f; 1472 rc = (word >> 6) & 0x1f; 1473 1474 switch (opcode) { 1475 #ifdef __powerpc64__ 1476 case 1: 1477 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 1478 goto unknown_opcode; 1479 1480 prefix_r = GET_PREFIX_R(word); 1481 ra = GET_PREFIX_RA(suffix); 1482 rd = (suffix >> 21) & 0x1f; 1483 op->reg = rd; 1484 op->val = regs->gpr[rd]; 1485 suffixopcode = get_op(suffix); 1486 prefixtype = (word >> 24) & 0x3; 1487 switch (prefixtype) { 1488 case 2: 1489 if (prefix_r && ra) 1490 return 0; 1491 switch (suffixopcode) { 1492 case 14: /* paddi */ 1493 op->type = COMPUTE | PREFIXED; 1494 op->val = mlsd_8lsd_ea(word, suffix, regs); 1495 goto compute_done; 1496 } 1497 } 1498 break; 1499 case 2: /* tdi */ 1500 if (rd & trap_compare(regs->gpr[ra], (short) word)) 1501 goto trap; 1502 return 1; 1503 #endif 1504 case 3: /* twi */ 1505 if (rd & trap_compare((int)regs->gpr[ra], (short) word)) 1506 goto trap; 1507 return 1; 1508 1509 #ifdef __powerpc64__ 1510 case 4: 1511 /* 1512 * There are very many instructions with this primary opcode 1513 * introduced in the ISA as early as v2.03. However, the ones 1514 * we currently emulate were all introduced with ISA 3.0 1515 */ 1516 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1517 goto unknown_opcode; 1518 1519 switch (word & 0x3f) { 1520 case 48: /* maddhd */ 1521 asm volatile(PPC_MADDHD(%0, %1, %2, %3) : 1522 "=r" (op->val) : "r" (regs->gpr[ra]), 1523 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1524 goto compute_done; 1525 1526 case 49: /* maddhdu */ 1527 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : 1528 "=r" (op->val) : "r" (regs->gpr[ra]), 1529 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1530 goto compute_done; 1531 1532 case 51: /* maddld */ 1533 asm volatile(PPC_MADDLD(%0, %1, %2, %3) : 1534 "=r" (op->val) : "r" (regs->gpr[ra]), 1535 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1536 goto compute_done; 1537 } 1538 1539 /* 1540 * There are other instructions from ISA 3.0 with the same 1541 * primary opcode which do not have emulation support yet. 1542 */ 1543 goto unknown_opcode; 1544 #endif 1545 1546 case 7: /* mulli */ 1547 op->val = regs->gpr[ra] * (short) word; 1548 goto compute_done; 1549 1550 case 8: /* subfic */ 1551 imm = (short) word; 1552 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); 1553 return 1; 1554 1555 case 10: /* cmpli */ 1556 imm = (unsigned short) word; 1557 val = regs->gpr[ra]; 1558 #ifdef __powerpc64__ 1559 if ((rd & 1) == 0) 1560 val = (unsigned int) val; 1561 #endif 1562 do_cmp_unsigned(regs, op, val, imm, rd >> 2); 1563 return 1; 1564 1565 case 11: /* cmpi */ 1566 imm = (short) word; 1567 val = regs->gpr[ra]; 1568 #ifdef __powerpc64__ 1569 if ((rd & 1) == 0) 1570 val = (int) val; 1571 #endif 1572 do_cmp_signed(regs, op, val, imm, rd >> 2); 1573 return 1; 1574 1575 case 12: /* addic */ 1576 imm = (short) word; 1577 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1578 return 1; 1579 1580 case 13: /* addic. */ 1581 imm = (short) word; 1582 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1583 set_cr0(regs, op); 1584 return 1; 1585 1586 case 14: /* addi */ 1587 imm = (short) word; 1588 if (ra) 1589 imm += regs->gpr[ra]; 1590 op->val = imm; 1591 goto compute_done; 1592 1593 case 15: /* addis */ 1594 imm = ((short) word) << 16; 1595 if (ra) 1596 imm += regs->gpr[ra]; 1597 op->val = imm; 1598 goto compute_done; 1599 1600 case 19: 1601 if (((word >> 1) & 0x1f) == 2) { 1602 /* addpcis */ 1603 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1604 goto unknown_opcode; 1605 imm = (short) (word & 0xffc1); /* d0 + d2 fields */ 1606 imm |= (word >> 15) & 0x3e; /* d1 field */ 1607 op->val = regs->nip + (imm << 16) + 4; 1608 goto compute_done; 1609 } 1610 op->type = UNKNOWN; 1611 return 0; 1612 1613 case 20: /* rlwimi */ 1614 mb = (word >> 6) & 0x1f; 1615 me = (word >> 1) & 0x1f; 1616 val = DATA32(regs->gpr[rd]); 1617 imm = MASK32(mb, me); 1618 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 1619 goto logical_done; 1620 1621 case 21: /* rlwinm */ 1622 mb = (word >> 6) & 0x1f; 1623 me = (word >> 1) & 0x1f; 1624 val = DATA32(regs->gpr[rd]); 1625 op->val = ROTATE(val, rb) & MASK32(mb, me); 1626 goto logical_done; 1627 1628 case 23: /* rlwnm */ 1629 mb = (word >> 6) & 0x1f; 1630 me = (word >> 1) & 0x1f; 1631 rb = regs->gpr[rb] & 0x1f; 1632 val = DATA32(regs->gpr[rd]); 1633 op->val = ROTATE(val, rb) & MASK32(mb, me); 1634 goto logical_done; 1635 1636 case 24: /* ori */ 1637 op->val = regs->gpr[rd] | (unsigned short) word; 1638 goto logical_done_nocc; 1639 1640 case 25: /* oris */ 1641 imm = (unsigned short) word; 1642 op->val = regs->gpr[rd] | (imm << 16); 1643 goto logical_done_nocc; 1644 1645 case 26: /* xori */ 1646 op->val = regs->gpr[rd] ^ (unsigned short) word; 1647 goto logical_done_nocc; 1648 1649 case 27: /* xoris */ 1650 imm = (unsigned short) word; 1651 op->val = regs->gpr[rd] ^ (imm << 16); 1652 goto logical_done_nocc; 1653 1654 case 28: /* andi. */ 1655 op->val = regs->gpr[rd] & (unsigned short) word; 1656 set_cr0(regs, op); 1657 goto logical_done_nocc; 1658 1659 case 29: /* andis. */ 1660 imm = (unsigned short) word; 1661 op->val = regs->gpr[rd] & (imm << 16); 1662 set_cr0(regs, op); 1663 goto logical_done_nocc; 1664 1665 #ifdef __powerpc64__ 1666 case 30: /* rld* */ 1667 mb = ((word >> 6) & 0x1f) | (word & 0x20); 1668 val = regs->gpr[rd]; 1669 if ((word & 0x10) == 0) { 1670 sh = rb | ((word & 2) << 4); 1671 val = ROTATE(val, sh); 1672 switch ((word >> 2) & 3) { 1673 case 0: /* rldicl */ 1674 val &= MASK64_L(mb); 1675 break; 1676 case 1: /* rldicr */ 1677 val &= MASK64_R(mb); 1678 break; 1679 case 2: /* rldic */ 1680 val &= MASK64(mb, 63 - sh); 1681 break; 1682 case 3: /* rldimi */ 1683 imm = MASK64(mb, 63 - sh); 1684 val = (regs->gpr[ra] & ~imm) | 1685 (val & imm); 1686 } 1687 op->val = val; 1688 goto logical_done; 1689 } else { 1690 sh = regs->gpr[rb] & 0x3f; 1691 val = ROTATE(val, sh); 1692 switch ((word >> 1) & 7) { 1693 case 0: /* rldcl */ 1694 op->val = val & MASK64_L(mb); 1695 goto logical_done; 1696 case 1: /* rldcr */ 1697 op->val = val & MASK64_R(mb); 1698 goto logical_done; 1699 } 1700 } 1701 #endif 1702 op->type = UNKNOWN; /* illegal instruction */ 1703 return 0; 1704 1705 case 31: 1706 /* isel occupies 32 minor opcodes */ 1707 if (((word >> 1) & 0x1f) == 15) { 1708 mb = (word >> 6) & 0x1f; /* bc field */ 1709 val = (regs->ccr >> (31 - mb)) & 1; 1710 val2 = (ra) ? regs->gpr[ra] : 0; 1711 1712 op->val = (val) ? val2 : regs->gpr[rb]; 1713 goto compute_done; 1714 } 1715 1716 switch ((word >> 1) & 0x3ff) { 1717 case 4: /* tw */ 1718 if (rd == 0x1f || 1719 (rd & trap_compare((int)regs->gpr[ra], 1720 (int)regs->gpr[rb]))) 1721 goto trap; 1722 return 1; 1723 #ifdef __powerpc64__ 1724 case 68: /* td */ 1725 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 1726 goto trap; 1727 return 1; 1728 #endif 1729 case 83: /* mfmsr */ 1730 if (user_mode(regs)) 1731 goto priv; 1732 op->type = MFMSR; 1733 op->reg = rd; 1734 return 0; 1735 case 146: /* mtmsr */ 1736 if (user_mode(regs)) 1737 goto priv; 1738 op->type = MTMSR; 1739 op->reg = rd; 1740 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 1741 return 0; 1742 #ifdef CONFIG_PPC64 1743 case 178: /* mtmsrd */ 1744 if (user_mode(regs)) 1745 goto priv; 1746 op->type = MTMSR; 1747 op->reg = rd; 1748 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 1749 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 1750 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 1751 op->val = imm; 1752 return 0; 1753 #endif 1754 1755 case 19: /* mfcr */ 1756 imm = 0xffffffffUL; 1757 if ((word >> 20) & 1) { 1758 imm = 0xf0000000UL; 1759 for (sh = 0; sh < 8; ++sh) { 1760 if (word & (0x80000 >> sh)) 1761 break; 1762 imm >>= 4; 1763 } 1764 } 1765 op->val = regs->ccr & imm; 1766 goto compute_done; 1767 1768 case 128: /* setb */ 1769 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1770 goto unknown_opcode; 1771 /* 1772 * 'ra' encodes the CR field number (bfa) in the top 3 bits. 1773 * Since each CR field is 4 bits, 1774 * we can simply mask off the bottom two bits (bfa * 4) 1775 * to yield the first bit in the CR field. 1776 */ 1777 ra = ra & ~0x3; 1778 /* 'val' stores bits of the CR field (bfa) */ 1779 val = regs->ccr >> (CR0_SHIFT - ra); 1780 /* checks if the LT bit of CR field (bfa) is set */ 1781 if (val & 8) 1782 op->val = -1; 1783 /* checks if the GT bit of CR field (bfa) is set */ 1784 else if (val & 4) 1785 op->val = 1; 1786 else 1787 op->val = 0; 1788 goto compute_done; 1789 1790 case 144: /* mtcrf */ 1791 op->type = COMPUTE + SETCC; 1792 imm = 0xf0000000UL; 1793 val = regs->gpr[rd]; 1794 op->ccval = regs->ccr; 1795 for (sh = 0; sh < 8; ++sh) { 1796 if (word & (0x80000 >> sh)) 1797 op->ccval = (op->ccval & ~imm) | 1798 (val & imm); 1799 imm >>= 4; 1800 } 1801 return 1; 1802 1803 case 339: /* mfspr */ 1804 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1805 op->type = MFSPR; 1806 op->reg = rd; 1807 op->spr = spr; 1808 if (spr == SPRN_XER || spr == SPRN_LR || 1809 spr == SPRN_CTR) 1810 return 1; 1811 return 0; 1812 1813 case 467: /* mtspr */ 1814 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1815 op->type = MTSPR; 1816 op->val = regs->gpr[rd]; 1817 op->spr = spr; 1818 if (spr == SPRN_XER || spr == SPRN_LR || 1819 spr == SPRN_CTR) 1820 return 1; 1821 return 0; 1822 1823 /* 1824 * Compare instructions 1825 */ 1826 case 0: /* cmp */ 1827 val = regs->gpr[ra]; 1828 val2 = regs->gpr[rb]; 1829 #ifdef __powerpc64__ 1830 if ((rd & 1) == 0) { 1831 /* word (32-bit) compare */ 1832 val = (int) val; 1833 val2 = (int) val2; 1834 } 1835 #endif 1836 do_cmp_signed(regs, op, val, val2, rd >> 2); 1837 return 1; 1838 1839 case 32: /* cmpl */ 1840 val = regs->gpr[ra]; 1841 val2 = regs->gpr[rb]; 1842 #ifdef __powerpc64__ 1843 if ((rd & 1) == 0) { 1844 /* word (32-bit) compare */ 1845 val = (unsigned int) val; 1846 val2 = (unsigned int) val2; 1847 } 1848 #endif 1849 do_cmp_unsigned(regs, op, val, val2, rd >> 2); 1850 return 1; 1851 1852 case 508: /* cmpb */ 1853 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); 1854 goto logical_done_nocc; 1855 1856 /* 1857 * Arithmetic instructions 1858 */ 1859 case 8: /* subfc */ 1860 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1861 regs->gpr[rb], 1); 1862 goto arith_done; 1863 #ifdef __powerpc64__ 1864 case 9: /* mulhdu */ 1865 asm("mulhdu %0,%1,%2" : "=r" (op->val) : 1866 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1867 goto arith_done; 1868 #endif 1869 case 10: /* addc */ 1870 add_with_carry(regs, op, rd, regs->gpr[ra], 1871 regs->gpr[rb], 0); 1872 goto arith_done; 1873 1874 case 11: /* mulhwu */ 1875 asm("mulhwu %0,%1,%2" : "=r" (op->val) : 1876 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1877 goto arith_done; 1878 1879 case 40: /* subf */ 1880 op->val = regs->gpr[rb] - regs->gpr[ra]; 1881 goto arith_done; 1882 #ifdef __powerpc64__ 1883 case 73: /* mulhd */ 1884 asm("mulhd %0,%1,%2" : "=r" (op->val) : 1885 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1886 goto arith_done; 1887 #endif 1888 case 75: /* mulhw */ 1889 asm("mulhw %0,%1,%2" : "=r" (op->val) : 1890 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1891 goto arith_done; 1892 1893 case 104: /* neg */ 1894 op->val = -regs->gpr[ra]; 1895 goto arith_done; 1896 1897 case 136: /* subfe */ 1898 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1899 regs->gpr[rb], regs->xer & XER_CA); 1900 goto arith_done; 1901 1902 case 138: /* adde */ 1903 add_with_carry(regs, op, rd, regs->gpr[ra], 1904 regs->gpr[rb], regs->xer & XER_CA); 1905 goto arith_done; 1906 1907 case 200: /* subfze */ 1908 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, 1909 regs->xer & XER_CA); 1910 goto arith_done; 1911 1912 case 202: /* addze */ 1913 add_with_carry(regs, op, rd, regs->gpr[ra], 0L, 1914 regs->xer & XER_CA); 1915 goto arith_done; 1916 1917 case 232: /* subfme */ 1918 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, 1919 regs->xer & XER_CA); 1920 goto arith_done; 1921 #ifdef __powerpc64__ 1922 case 233: /* mulld */ 1923 op->val = regs->gpr[ra] * regs->gpr[rb]; 1924 goto arith_done; 1925 #endif 1926 case 234: /* addme */ 1927 add_with_carry(regs, op, rd, regs->gpr[ra], -1L, 1928 regs->xer & XER_CA); 1929 goto arith_done; 1930 1931 case 235: /* mullw */ 1932 op->val = (long)(int) regs->gpr[ra] * 1933 (int) regs->gpr[rb]; 1934 1935 goto arith_done; 1936 #ifdef __powerpc64__ 1937 case 265: /* modud */ 1938 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1939 goto unknown_opcode; 1940 op->val = regs->gpr[ra] % regs->gpr[rb]; 1941 goto compute_done; 1942 #endif 1943 case 266: /* add */ 1944 op->val = regs->gpr[ra] + regs->gpr[rb]; 1945 goto arith_done; 1946 1947 case 267: /* moduw */ 1948 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1949 goto unknown_opcode; 1950 op->val = (unsigned int) regs->gpr[ra] % 1951 (unsigned int) regs->gpr[rb]; 1952 goto compute_done; 1953 #ifdef __powerpc64__ 1954 case 457: /* divdu */ 1955 op->val = regs->gpr[ra] / regs->gpr[rb]; 1956 goto arith_done; 1957 #endif 1958 case 459: /* divwu */ 1959 op->val = (unsigned int) regs->gpr[ra] / 1960 (unsigned int) regs->gpr[rb]; 1961 goto arith_done; 1962 #ifdef __powerpc64__ 1963 case 489: /* divd */ 1964 op->val = (long int) regs->gpr[ra] / 1965 (long int) regs->gpr[rb]; 1966 goto arith_done; 1967 #endif 1968 case 491: /* divw */ 1969 op->val = (int) regs->gpr[ra] / 1970 (int) regs->gpr[rb]; 1971 goto arith_done; 1972 #ifdef __powerpc64__ 1973 case 425: /* divde[.] */ 1974 asm volatile(PPC_DIVDE(%0, %1, %2) : 1975 "=r" (op->val) : "r" (regs->gpr[ra]), 1976 "r" (regs->gpr[rb])); 1977 goto arith_done; 1978 case 393: /* divdeu[.] */ 1979 asm volatile(PPC_DIVDEU(%0, %1, %2) : 1980 "=r" (op->val) : "r" (regs->gpr[ra]), 1981 "r" (regs->gpr[rb])); 1982 goto arith_done; 1983 #endif 1984 case 755: /* darn */ 1985 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1986 goto unknown_opcode; 1987 switch (ra & 0x3) { 1988 case 0: 1989 /* 32-bit conditioned */ 1990 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); 1991 goto compute_done; 1992 1993 case 1: 1994 /* 64-bit conditioned */ 1995 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); 1996 goto compute_done; 1997 1998 case 2: 1999 /* 64-bit raw */ 2000 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); 2001 goto compute_done; 2002 } 2003 2004 goto unknown_opcode; 2005 #ifdef __powerpc64__ 2006 case 777: /* modsd */ 2007 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2008 goto unknown_opcode; 2009 op->val = (long int) regs->gpr[ra] % 2010 (long int) regs->gpr[rb]; 2011 goto compute_done; 2012 #endif 2013 case 779: /* modsw */ 2014 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2015 goto unknown_opcode; 2016 op->val = (int) regs->gpr[ra] % 2017 (int) regs->gpr[rb]; 2018 goto compute_done; 2019 2020 2021 /* 2022 * Logical instructions 2023 */ 2024 case 26: /* cntlzw */ 2025 val = (unsigned int) regs->gpr[rd]; 2026 op->val = ( val ? __builtin_clz(val) : 32 ); 2027 goto logical_done; 2028 #ifdef __powerpc64__ 2029 case 58: /* cntlzd */ 2030 val = regs->gpr[rd]; 2031 op->val = ( val ? __builtin_clzl(val) : 64 ); 2032 goto logical_done; 2033 #endif 2034 case 28: /* and */ 2035 op->val = regs->gpr[rd] & regs->gpr[rb]; 2036 goto logical_done; 2037 2038 case 60: /* andc */ 2039 op->val = regs->gpr[rd] & ~regs->gpr[rb]; 2040 goto logical_done; 2041 2042 case 122: /* popcntb */ 2043 do_popcnt(regs, op, regs->gpr[rd], 8); 2044 goto logical_done_nocc; 2045 2046 case 124: /* nor */ 2047 op->val = ~(regs->gpr[rd] | regs->gpr[rb]); 2048 goto logical_done; 2049 2050 case 154: /* prtyw */ 2051 do_prty(regs, op, regs->gpr[rd], 32); 2052 goto logical_done_nocc; 2053 2054 case 186: /* prtyd */ 2055 do_prty(regs, op, regs->gpr[rd], 64); 2056 goto logical_done_nocc; 2057 #ifdef CONFIG_PPC64 2058 case 252: /* bpermd */ 2059 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); 2060 goto logical_done_nocc; 2061 #endif 2062 case 284: /* xor */ 2063 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); 2064 goto logical_done; 2065 2066 case 316: /* xor */ 2067 op->val = regs->gpr[rd] ^ regs->gpr[rb]; 2068 goto logical_done; 2069 2070 case 378: /* popcntw */ 2071 do_popcnt(regs, op, regs->gpr[rd], 32); 2072 goto logical_done_nocc; 2073 2074 case 412: /* orc */ 2075 op->val = regs->gpr[rd] | ~regs->gpr[rb]; 2076 goto logical_done; 2077 2078 case 444: /* or */ 2079 op->val = regs->gpr[rd] | regs->gpr[rb]; 2080 goto logical_done; 2081 2082 case 476: /* nand */ 2083 op->val = ~(regs->gpr[rd] & regs->gpr[rb]); 2084 goto logical_done; 2085 #ifdef CONFIG_PPC64 2086 case 506: /* popcntd */ 2087 do_popcnt(regs, op, regs->gpr[rd], 64); 2088 goto logical_done_nocc; 2089 #endif 2090 case 538: /* cnttzw */ 2091 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2092 goto unknown_opcode; 2093 val = (unsigned int) regs->gpr[rd]; 2094 op->val = (val ? __builtin_ctz(val) : 32); 2095 goto logical_done; 2096 #ifdef __powerpc64__ 2097 case 570: /* cnttzd */ 2098 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2099 goto unknown_opcode; 2100 val = regs->gpr[rd]; 2101 op->val = (val ? __builtin_ctzl(val) : 64); 2102 goto logical_done; 2103 #endif 2104 case 922: /* extsh */ 2105 op->val = (signed short) regs->gpr[rd]; 2106 goto logical_done; 2107 2108 case 954: /* extsb */ 2109 op->val = (signed char) regs->gpr[rd]; 2110 goto logical_done; 2111 #ifdef __powerpc64__ 2112 case 986: /* extsw */ 2113 op->val = (signed int) regs->gpr[rd]; 2114 goto logical_done; 2115 #endif 2116 2117 /* 2118 * Shift instructions 2119 */ 2120 case 24: /* slw */ 2121 sh = regs->gpr[rb] & 0x3f; 2122 if (sh < 32) 2123 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; 2124 else 2125 op->val = 0; 2126 goto logical_done; 2127 2128 case 536: /* srw */ 2129 sh = regs->gpr[rb] & 0x3f; 2130 if (sh < 32) 2131 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; 2132 else 2133 op->val = 0; 2134 goto logical_done; 2135 2136 case 792: /* sraw */ 2137 op->type = COMPUTE + SETREG + SETXER; 2138 sh = regs->gpr[rb] & 0x3f; 2139 ival = (signed int) regs->gpr[rd]; 2140 op->val = ival >> (sh < 32 ? sh : 31); 2141 op->xerval = regs->xer; 2142 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 2143 op->xerval |= XER_CA; 2144 else 2145 op->xerval &= ~XER_CA; 2146 set_ca32(op, op->xerval & XER_CA); 2147 goto logical_done; 2148 2149 case 824: /* srawi */ 2150 op->type = COMPUTE + SETREG + SETXER; 2151 sh = rb; 2152 ival = (signed int) regs->gpr[rd]; 2153 op->val = ival >> sh; 2154 op->xerval = regs->xer; 2155 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2156 op->xerval |= XER_CA; 2157 else 2158 op->xerval &= ~XER_CA; 2159 set_ca32(op, op->xerval & XER_CA); 2160 goto logical_done; 2161 2162 #ifdef __powerpc64__ 2163 case 27: /* sld */ 2164 sh = regs->gpr[rb] & 0x7f; 2165 if (sh < 64) 2166 op->val = regs->gpr[rd] << sh; 2167 else 2168 op->val = 0; 2169 goto logical_done; 2170 2171 case 539: /* srd */ 2172 sh = regs->gpr[rb] & 0x7f; 2173 if (sh < 64) 2174 op->val = regs->gpr[rd] >> sh; 2175 else 2176 op->val = 0; 2177 goto logical_done; 2178 2179 case 794: /* srad */ 2180 op->type = COMPUTE + SETREG + SETXER; 2181 sh = regs->gpr[rb] & 0x7f; 2182 ival = (signed long int) regs->gpr[rd]; 2183 op->val = ival >> (sh < 64 ? sh : 63); 2184 op->xerval = regs->xer; 2185 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 2186 op->xerval |= XER_CA; 2187 else 2188 op->xerval &= ~XER_CA; 2189 set_ca32(op, op->xerval & XER_CA); 2190 goto logical_done; 2191 2192 case 826: /* sradi with sh_5 = 0 */ 2193 case 827: /* sradi with sh_5 = 1 */ 2194 op->type = COMPUTE + SETREG + SETXER; 2195 sh = rb | ((word & 2) << 4); 2196 ival = (signed long int) regs->gpr[rd]; 2197 op->val = ival >> sh; 2198 op->xerval = regs->xer; 2199 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2200 op->xerval |= XER_CA; 2201 else 2202 op->xerval &= ~XER_CA; 2203 set_ca32(op, op->xerval & XER_CA); 2204 goto logical_done; 2205 2206 case 890: /* extswsli with sh_5 = 0 */ 2207 case 891: /* extswsli with sh_5 = 1 */ 2208 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2209 goto unknown_opcode; 2210 op->type = COMPUTE + SETREG; 2211 sh = rb | ((word & 2) << 4); 2212 val = (signed int) regs->gpr[rd]; 2213 if (sh) 2214 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); 2215 else 2216 op->val = val; 2217 goto logical_done; 2218 2219 #endif /* __powerpc64__ */ 2220 2221 /* 2222 * Cache instructions 2223 */ 2224 case 54: /* dcbst */ 2225 op->type = MKOP(CACHEOP, DCBST, 0); 2226 op->ea = xform_ea(word, regs); 2227 return 0; 2228 2229 case 86: /* dcbf */ 2230 op->type = MKOP(CACHEOP, DCBF, 0); 2231 op->ea = xform_ea(word, regs); 2232 return 0; 2233 2234 case 246: /* dcbtst */ 2235 op->type = MKOP(CACHEOP, DCBTST, 0); 2236 op->ea = xform_ea(word, regs); 2237 op->reg = rd; 2238 return 0; 2239 2240 case 278: /* dcbt */ 2241 op->type = MKOP(CACHEOP, DCBTST, 0); 2242 op->ea = xform_ea(word, regs); 2243 op->reg = rd; 2244 return 0; 2245 2246 case 982: /* icbi */ 2247 op->type = MKOP(CACHEOP, ICBI, 0); 2248 op->ea = xform_ea(word, regs); 2249 return 0; 2250 2251 case 1014: /* dcbz */ 2252 op->type = MKOP(CACHEOP, DCBZ, 0); 2253 op->ea = xform_ea(word, regs); 2254 return 0; 2255 } 2256 break; 2257 } 2258 2259 /* 2260 * Loads and stores. 2261 */ 2262 op->type = UNKNOWN; 2263 op->update_reg = ra; 2264 op->reg = rd; 2265 op->val = regs->gpr[rd]; 2266 u = (word >> 20) & UPDATE; 2267 op->vsx_flags = 0; 2268 2269 switch (opcode) { 2270 case 31: 2271 u = word & UPDATE; 2272 op->ea = xform_ea(word, regs); 2273 switch ((word >> 1) & 0x3ff) { 2274 case 20: /* lwarx */ 2275 op->type = MKOP(LARX, 0, 4); 2276 break; 2277 2278 case 150: /* stwcx. */ 2279 op->type = MKOP(STCX, 0, 4); 2280 break; 2281 2282 #ifdef CONFIG_PPC_HAS_LBARX_LHARX 2283 case 52: /* lbarx */ 2284 op->type = MKOP(LARX, 0, 1); 2285 break; 2286 2287 case 694: /* stbcx. */ 2288 op->type = MKOP(STCX, 0, 1); 2289 break; 2290 2291 case 116: /* lharx */ 2292 op->type = MKOP(LARX, 0, 2); 2293 break; 2294 2295 case 726: /* sthcx. */ 2296 op->type = MKOP(STCX, 0, 2); 2297 break; 2298 #endif 2299 #ifdef __powerpc64__ 2300 case 84: /* ldarx */ 2301 op->type = MKOP(LARX, 0, 8); 2302 break; 2303 2304 case 214: /* stdcx. */ 2305 op->type = MKOP(STCX, 0, 8); 2306 break; 2307 2308 case 276: /* lqarx */ 2309 if (!((rd & 1) || rd == ra || rd == rb)) 2310 op->type = MKOP(LARX, 0, 16); 2311 break; 2312 2313 case 182: /* stqcx. */ 2314 if (!(rd & 1)) 2315 op->type = MKOP(STCX, 0, 16); 2316 break; 2317 #endif 2318 2319 case 23: /* lwzx */ 2320 case 55: /* lwzux */ 2321 op->type = MKOP(LOAD, u, 4); 2322 break; 2323 2324 case 87: /* lbzx */ 2325 case 119: /* lbzux */ 2326 op->type = MKOP(LOAD, u, 1); 2327 break; 2328 2329 #ifdef CONFIG_ALTIVEC 2330 /* 2331 * Note: for the load/store vector element instructions, 2332 * bits of the EA say which field of the VMX register to use. 2333 */ 2334 case 7: /* lvebx */ 2335 op->type = MKOP(LOAD_VMX, 0, 1); 2336 op->element_size = 1; 2337 break; 2338 2339 case 39: /* lvehx */ 2340 op->type = MKOP(LOAD_VMX, 0, 2); 2341 op->element_size = 2; 2342 break; 2343 2344 case 71: /* lvewx */ 2345 op->type = MKOP(LOAD_VMX, 0, 4); 2346 op->element_size = 4; 2347 break; 2348 2349 case 103: /* lvx */ 2350 case 359: /* lvxl */ 2351 op->type = MKOP(LOAD_VMX, 0, 16); 2352 op->element_size = 16; 2353 break; 2354 2355 case 135: /* stvebx */ 2356 op->type = MKOP(STORE_VMX, 0, 1); 2357 op->element_size = 1; 2358 break; 2359 2360 case 167: /* stvehx */ 2361 op->type = MKOP(STORE_VMX, 0, 2); 2362 op->element_size = 2; 2363 break; 2364 2365 case 199: /* stvewx */ 2366 op->type = MKOP(STORE_VMX, 0, 4); 2367 op->element_size = 4; 2368 break; 2369 2370 case 231: /* stvx */ 2371 case 487: /* stvxl */ 2372 op->type = MKOP(STORE_VMX, 0, 16); 2373 break; 2374 #endif /* CONFIG_ALTIVEC */ 2375 2376 #ifdef __powerpc64__ 2377 case 21: /* ldx */ 2378 case 53: /* ldux */ 2379 op->type = MKOP(LOAD, u, 8); 2380 break; 2381 2382 case 149: /* stdx */ 2383 case 181: /* stdux */ 2384 op->type = MKOP(STORE, u, 8); 2385 break; 2386 #endif 2387 2388 case 151: /* stwx */ 2389 case 183: /* stwux */ 2390 op->type = MKOP(STORE, u, 4); 2391 break; 2392 2393 case 215: /* stbx */ 2394 case 247: /* stbux */ 2395 op->type = MKOP(STORE, u, 1); 2396 break; 2397 2398 case 279: /* lhzx */ 2399 case 311: /* lhzux */ 2400 op->type = MKOP(LOAD, u, 2); 2401 break; 2402 2403 #ifdef __powerpc64__ 2404 case 341: /* lwax */ 2405 case 373: /* lwaux */ 2406 op->type = MKOP(LOAD, SIGNEXT | u, 4); 2407 break; 2408 #endif 2409 2410 case 343: /* lhax */ 2411 case 375: /* lhaux */ 2412 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2413 break; 2414 2415 case 407: /* sthx */ 2416 case 439: /* sthux */ 2417 op->type = MKOP(STORE, u, 2); 2418 break; 2419 2420 #ifdef __powerpc64__ 2421 case 532: /* ldbrx */ 2422 op->type = MKOP(LOAD, BYTEREV, 8); 2423 break; 2424 2425 #endif 2426 case 533: /* lswx */ 2427 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 2428 break; 2429 2430 case 534: /* lwbrx */ 2431 op->type = MKOP(LOAD, BYTEREV, 4); 2432 break; 2433 2434 case 597: /* lswi */ 2435 if (rb == 0) 2436 rb = 32; /* # bytes to load */ 2437 op->type = MKOP(LOAD_MULTI, 0, rb); 2438 op->ea = ra ? regs->gpr[ra] : 0; 2439 break; 2440 2441 #ifdef CONFIG_PPC_FPU 2442 case 535: /* lfsx */ 2443 case 567: /* lfsux */ 2444 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2445 break; 2446 2447 case 599: /* lfdx */ 2448 case 631: /* lfdux */ 2449 op->type = MKOP(LOAD_FP, u, 8); 2450 break; 2451 2452 case 663: /* stfsx */ 2453 case 695: /* stfsux */ 2454 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2455 break; 2456 2457 case 727: /* stfdx */ 2458 case 759: /* stfdux */ 2459 op->type = MKOP(STORE_FP, u, 8); 2460 break; 2461 2462 #ifdef __powerpc64__ 2463 case 791: /* lfdpx */ 2464 op->type = MKOP(LOAD_FP, 0, 16); 2465 break; 2466 2467 case 855: /* lfiwax */ 2468 op->type = MKOP(LOAD_FP, SIGNEXT, 4); 2469 break; 2470 2471 case 887: /* lfiwzx */ 2472 op->type = MKOP(LOAD_FP, 0, 4); 2473 break; 2474 2475 case 919: /* stfdpx */ 2476 op->type = MKOP(STORE_FP, 0, 16); 2477 break; 2478 2479 case 983: /* stfiwx */ 2480 op->type = MKOP(STORE_FP, 0, 4); 2481 break; 2482 #endif /* __powerpc64 */ 2483 #endif /* CONFIG_PPC_FPU */ 2484 2485 #ifdef __powerpc64__ 2486 case 660: /* stdbrx */ 2487 op->type = MKOP(STORE, BYTEREV, 8); 2488 op->val = byterev_8(regs->gpr[rd]); 2489 break; 2490 2491 #endif 2492 case 661: /* stswx */ 2493 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 2494 break; 2495 2496 case 662: /* stwbrx */ 2497 op->type = MKOP(STORE, BYTEREV, 4); 2498 op->val = byterev_4(regs->gpr[rd]); 2499 break; 2500 2501 case 725: /* stswi */ 2502 if (rb == 0) 2503 rb = 32; /* # bytes to store */ 2504 op->type = MKOP(STORE_MULTI, 0, rb); 2505 op->ea = ra ? regs->gpr[ra] : 0; 2506 break; 2507 2508 case 790: /* lhbrx */ 2509 op->type = MKOP(LOAD, BYTEREV, 2); 2510 break; 2511 2512 case 918: /* sthbrx */ 2513 op->type = MKOP(STORE, BYTEREV, 2); 2514 op->val = byterev_2(regs->gpr[rd]); 2515 break; 2516 2517 #ifdef CONFIG_VSX 2518 case 12: /* lxsiwzx */ 2519 op->reg = rd | ((word & 1) << 5); 2520 op->type = MKOP(LOAD_VSX, 0, 4); 2521 op->element_size = 8; 2522 break; 2523 2524 case 76: /* lxsiwax */ 2525 op->reg = rd | ((word & 1) << 5); 2526 op->type = MKOP(LOAD_VSX, SIGNEXT, 4); 2527 op->element_size = 8; 2528 break; 2529 2530 case 140: /* stxsiwx */ 2531 op->reg = rd | ((word & 1) << 5); 2532 op->type = MKOP(STORE_VSX, 0, 4); 2533 op->element_size = 8; 2534 break; 2535 2536 case 268: /* lxvx */ 2537 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2538 goto unknown_opcode; 2539 op->reg = rd | ((word & 1) << 5); 2540 op->type = MKOP(LOAD_VSX, 0, 16); 2541 op->element_size = 16; 2542 op->vsx_flags = VSX_CHECK_VEC; 2543 break; 2544 2545 case 269: /* lxvl */ 2546 case 301: { /* lxvll */ 2547 int nb; 2548 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2549 goto unknown_opcode; 2550 op->reg = rd | ((word & 1) << 5); 2551 op->ea = ra ? regs->gpr[ra] : 0; 2552 nb = regs->gpr[rb] & 0xff; 2553 if (nb > 16) 2554 nb = 16; 2555 op->type = MKOP(LOAD_VSX, 0, nb); 2556 op->element_size = 16; 2557 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2558 VSX_CHECK_VEC; 2559 break; 2560 } 2561 case 332: /* lxvdsx */ 2562 op->reg = rd | ((word & 1) << 5); 2563 op->type = MKOP(LOAD_VSX, 0, 8); 2564 op->element_size = 8; 2565 op->vsx_flags = VSX_SPLAT; 2566 break; 2567 2568 case 333: /* lxvpx */ 2569 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2570 goto unknown_opcode; 2571 op->reg = VSX_REGISTER_XTP(rd); 2572 op->type = MKOP(LOAD_VSX, 0, 32); 2573 op->element_size = 32; 2574 break; 2575 2576 case 364: /* lxvwsx */ 2577 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2578 goto unknown_opcode; 2579 op->reg = rd | ((word & 1) << 5); 2580 op->type = MKOP(LOAD_VSX, 0, 4); 2581 op->element_size = 4; 2582 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; 2583 break; 2584 2585 case 396: /* stxvx */ 2586 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2587 goto unknown_opcode; 2588 op->reg = rd | ((word & 1) << 5); 2589 op->type = MKOP(STORE_VSX, 0, 16); 2590 op->element_size = 16; 2591 op->vsx_flags = VSX_CHECK_VEC; 2592 break; 2593 2594 case 397: /* stxvl */ 2595 case 429: { /* stxvll */ 2596 int nb; 2597 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2598 goto unknown_opcode; 2599 op->reg = rd | ((word & 1) << 5); 2600 op->ea = ra ? regs->gpr[ra] : 0; 2601 nb = regs->gpr[rb] & 0xff; 2602 if (nb > 16) 2603 nb = 16; 2604 op->type = MKOP(STORE_VSX, 0, nb); 2605 op->element_size = 16; 2606 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2607 VSX_CHECK_VEC; 2608 break; 2609 } 2610 case 461: /* stxvpx */ 2611 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2612 goto unknown_opcode; 2613 op->reg = VSX_REGISTER_XTP(rd); 2614 op->type = MKOP(STORE_VSX, 0, 32); 2615 op->element_size = 32; 2616 break; 2617 case 524: /* lxsspx */ 2618 op->reg = rd | ((word & 1) << 5); 2619 op->type = MKOP(LOAD_VSX, 0, 4); 2620 op->element_size = 8; 2621 op->vsx_flags = VSX_FPCONV; 2622 break; 2623 2624 case 588: /* lxsdx */ 2625 op->reg = rd | ((word & 1) << 5); 2626 op->type = MKOP(LOAD_VSX, 0, 8); 2627 op->element_size = 8; 2628 break; 2629 2630 case 652: /* stxsspx */ 2631 op->reg = rd | ((word & 1) << 5); 2632 op->type = MKOP(STORE_VSX, 0, 4); 2633 op->element_size = 8; 2634 op->vsx_flags = VSX_FPCONV; 2635 break; 2636 2637 case 716: /* stxsdx */ 2638 op->reg = rd | ((word & 1) << 5); 2639 op->type = MKOP(STORE_VSX, 0, 8); 2640 op->element_size = 8; 2641 break; 2642 2643 case 780: /* lxvw4x */ 2644 op->reg = rd | ((word & 1) << 5); 2645 op->type = MKOP(LOAD_VSX, 0, 16); 2646 op->element_size = 4; 2647 break; 2648 2649 case 781: /* lxsibzx */ 2650 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2651 goto unknown_opcode; 2652 op->reg = rd | ((word & 1) << 5); 2653 op->type = MKOP(LOAD_VSX, 0, 1); 2654 op->element_size = 8; 2655 op->vsx_flags = VSX_CHECK_VEC; 2656 break; 2657 2658 case 812: /* lxvh8x */ 2659 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2660 goto unknown_opcode; 2661 op->reg = rd | ((word & 1) << 5); 2662 op->type = MKOP(LOAD_VSX, 0, 16); 2663 op->element_size = 2; 2664 op->vsx_flags = VSX_CHECK_VEC; 2665 break; 2666 2667 case 813: /* lxsihzx */ 2668 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2669 goto unknown_opcode; 2670 op->reg = rd | ((word & 1) << 5); 2671 op->type = MKOP(LOAD_VSX, 0, 2); 2672 op->element_size = 8; 2673 op->vsx_flags = VSX_CHECK_VEC; 2674 break; 2675 2676 case 844: /* lxvd2x */ 2677 op->reg = rd | ((word & 1) << 5); 2678 op->type = MKOP(LOAD_VSX, 0, 16); 2679 op->element_size = 8; 2680 break; 2681 2682 case 876: /* lxvb16x */ 2683 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2684 goto unknown_opcode; 2685 op->reg = rd | ((word & 1) << 5); 2686 op->type = MKOP(LOAD_VSX, 0, 16); 2687 op->element_size = 1; 2688 op->vsx_flags = VSX_CHECK_VEC; 2689 break; 2690 2691 case 908: /* stxvw4x */ 2692 op->reg = rd | ((word & 1) << 5); 2693 op->type = MKOP(STORE_VSX, 0, 16); 2694 op->element_size = 4; 2695 break; 2696 2697 case 909: /* stxsibx */ 2698 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2699 goto unknown_opcode; 2700 op->reg = rd | ((word & 1) << 5); 2701 op->type = MKOP(STORE_VSX, 0, 1); 2702 op->element_size = 8; 2703 op->vsx_flags = VSX_CHECK_VEC; 2704 break; 2705 2706 case 940: /* stxvh8x */ 2707 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2708 goto unknown_opcode; 2709 op->reg = rd | ((word & 1) << 5); 2710 op->type = MKOP(STORE_VSX, 0, 16); 2711 op->element_size = 2; 2712 op->vsx_flags = VSX_CHECK_VEC; 2713 break; 2714 2715 case 941: /* stxsihx */ 2716 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2717 goto unknown_opcode; 2718 op->reg = rd | ((word & 1) << 5); 2719 op->type = MKOP(STORE_VSX, 0, 2); 2720 op->element_size = 8; 2721 op->vsx_flags = VSX_CHECK_VEC; 2722 break; 2723 2724 case 972: /* stxvd2x */ 2725 op->reg = rd | ((word & 1) << 5); 2726 op->type = MKOP(STORE_VSX, 0, 16); 2727 op->element_size = 8; 2728 break; 2729 2730 case 1004: /* stxvb16x */ 2731 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2732 goto unknown_opcode; 2733 op->reg = rd | ((word & 1) << 5); 2734 op->type = MKOP(STORE_VSX, 0, 16); 2735 op->element_size = 1; 2736 op->vsx_flags = VSX_CHECK_VEC; 2737 break; 2738 2739 #endif /* CONFIG_VSX */ 2740 } 2741 break; 2742 2743 case 32: /* lwz */ 2744 case 33: /* lwzu */ 2745 op->type = MKOP(LOAD, u, 4); 2746 op->ea = dform_ea(word, regs); 2747 break; 2748 2749 case 34: /* lbz */ 2750 case 35: /* lbzu */ 2751 op->type = MKOP(LOAD, u, 1); 2752 op->ea = dform_ea(word, regs); 2753 break; 2754 2755 case 36: /* stw */ 2756 case 37: /* stwu */ 2757 op->type = MKOP(STORE, u, 4); 2758 op->ea = dform_ea(word, regs); 2759 break; 2760 2761 case 38: /* stb */ 2762 case 39: /* stbu */ 2763 op->type = MKOP(STORE, u, 1); 2764 op->ea = dform_ea(word, regs); 2765 break; 2766 2767 case 40: /* lhz */ 2768 case 41: /* lhzu */ 2769 op->type = MKOP(LOAD, u, 2); 2770 op->ea = dform_ea(word, regs); 2771 break; 2772 2773 case 42: /* lha */ 2774 case 43: /* lhau */ 2775 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2776 op->ea = dform_ea(word, regs); 2777 break; 2778 2779 case 44: /* sth */ 2780 case 45: /* sthu */ 2781 op->type = MKOP(STORE, u, 2); 2782 op->ea = dform_ea(word, regs); 2783 break; 2784 2785 case 46: /* lmw */ 2786 if (ra >= rd) 2787 break; /* invalid form, ra in range to load */ 2788 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 2789 op->ea = dform_ea(word, regs); 2790 break; 2791 2792 case 47: /* stmw */ 2793 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 2794 op->ea = dform_ea(word, regs); 2795 break; 2796 2797 #ifdef CONFIG_PPC_FPU 2798 case 48: /* lfs */ 2799 case 49: /* lfsu */ 2800 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2801 op->ea = dform_ea(word, regs); 2802 break; 2803 2804 case 50: /* lfd */ 2805 case 51: /* lfdu */ 2806 op->type = MKOP(LOAD_FP, u, 8); 2807 op->ea = dform_ea(word, regs); 2808 break; 2809 2810 case 52: /* stfs */ 2811 case 53: /* stfsu */ 2812 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2813 op->ea = dform_ea(word, regs); 2814 break; 2815 2816 case 54: /* stfd */ 2817 case 55: /* stfdu */ 2818 op->type = MKOP(STORE_FP, u, 8); 2819 op->ea = dform_ea(word, regs); 2820 break; 2821 #endif 2822 2823 #ifdef __powerpc64__ 2824 case 56: /* lq */ 2825 if (!((rd & 1) || (rd == ra))) 2826 op->type = MKOP(LOAD, 0, 16); 2827 op->ea = dqform_ea(word, regs); 2828 break; 2829 #endif 2830 2831 #ifdef CONFIG_VSX 2832 case 57: /* lfdp, lxsd, lxssp */ 2833 op->ea = dsform_ea(word, regs); 2834 switch (word & 3) { 2835 case 0: /* lfdp */ 2836 if (rd & 1) 2837 break; /* reg must be even */ 2838 op->type = MKOP(LOAD_FP, 0, 16); 2839 break; 2840 case 2: /* lxsd */ 2841 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2842 goto unknown_opcode; 2843 op->reg = rd + 32; 2844 op->type = MKOP(LOAD_VSX, 0, 8); 2845 op->element_size = 8; 2846 op->vsx_flags = VSX_CHECK_VEC; 2847 break; 2848 case 3: /* lxssp */ 2849 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2850 goto unknown_opcode; 2851 op->reg = rd + 32; 2852 op->type = MKOP(LOAD_VSX, 0, 4); 2853 op->element_size = 8; 2854 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2855 break; 2856 } 2857 break; 2858 #endif /* CONFIG_VSX */ 2859 2860 #ifdef __powerpc64__ 2861 case 58: /* ld[u], lwa */ 2862 op->ea = dsform_ea(word, regs); 2863 switch (word & 3) { 2864 case 0: /* ld */ 2865 op->type = MKOP(LOAD, 0, 8); 2866 break; 2867 case 1: /* ldu */ 2868 op->type = MKOP(LOAD, UPDATE, 8); 2869 break; 2870 case 2: /* lwa */ 2871 op->type = MKOP(LOAD, SIGNEXT, 4); 2872 break; 2873 } 2874 break; 2875 #endif 2876 2877 #ifdef CONFIG_VSX 2878 case 6: 2879 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2880 goto unknown_opcode; 2881 op->ea = dqform_ea(word, regs); 2882 op->reg = VSX_REGISTER_XTP(rd); 2883 op->element_size = 32; 2884 switch (word & 0xf) { 2885 case 0: /* lxvp */ 2886 op->type = MKOP(LOAD_VSX, 0, 32); 2887 break; 2888 case 1: /* stxvp */ 2889 op->type = MKOP(STORE_VSX, 0, 32); 2890 break; 2891 } 2892 break; 2893 2894 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ 2895 switch (word & 7) { 2896 case 0: /* stfdp with LSB of DS field = 0 */ 2897 case 4: /* stfdp with LSB of DS field = 1 */ 2898 op->ea = dsform_ea(word, regs); 2899 op->type = MKOP(STORE_FP, 0, 16); 2900 break; 2901 2902 case 1: /* lxv */ 2903 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2904 goto unknown_opcode; 2905 op->ea = dqform_ea(word, regs); 2906 if (word & 8) 2907 op->reg = rd + 32; 2908 op->type = MKOP(LOAD_VSX, 0, 16); 2909 op->element_size = 16; 2910 op->vsx_flags = VSX_CHECK_VEC; 2911 break; 2912 2913 case 2: /* stxsd with LSB of DS field = 0 */ 2914 case 6: /* stxsd with LSB of DS field = 1 */ 2915 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2916 goto unknown_opcode; 2917 op->ea = dsform_ea(word, regs); 2918 op->reg = rd + 32; 2919 op->type = MKOP(STORE_VSX, 0, 8); 2920 op->element_size = 8; 2921 op->vsx_flags = VSX_CHECK_VEC; 2922 break; 2923 2924 case 3: /* stxssp with LSB of DS field = 0 */ 2925 case 7: /* stxssp with LSB of DS field = 1 */ 2926 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2927 goto unknown_opcode; 2928 op->ea = dsform_ea(word, regs); 2929 op->reg = rd + 32; 2930 op->type = MKOP(STORE_VSX, 0, 4); 2931 op->element_size = 8; 2932 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2933 break; 2934 2935 case 5: /* stxv */ 2936 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2937 goto unknown_opcode; 2938 op->ea = dqform_ea(word, regs); 2939 if (word & 8) 2940 op->reg = rd + 32; 2941 op->type = MKOP(STORE_VSX, 0, 16); 2942 op->element_size = 16; 2943 op->vsx_flags = VSX_CHECK_VEC; 2944 break; 2945 } 2946 break; 2947 #endif /* CONFIG_VSX */ 2948 2949 #ifdef __powerpc64__ 2950 case 62: /* std[u] */ 2951 op->ea = dsform_ea(word, regs); 2952 switch (word & 3) { 2953 case 0: /* std */ 2954 op->type = MKOP(STORE, 0, 8); 2955 break; 2956 case 1: /* stdu */ 2957 op->type = MKOP(STORE, UPDATE, 8); 2958 break; 2959 case 2: /* stq */ 2960 if (!(rd & 1)) 2961 op->type = MKOP(STORE, 0, 16); 2962 break; 2963 } 2964 break; 2965 case 1: /* Prefixed instructions */ 2966 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2967 goto unknown_opcode; 2968 2969 prefix_r = GET_PREFIX_R(word); 2970 ra = GET_PREFIX_RA(suffix); 2971 op->update_reg = ra; 2972 rd = (suffix >> 21) & 0x1f; 2973 op->reg = rd; 2974 op->val = regs->gpr[rd]; 2975 2976 suffixopcode = get_op(suffix); 2977 prefixtype = (word >> 24) & 0x3; 2978 switch (prefixtype) { 2979 case 0: /* Type 00 Eight-Byte Load/Store */ 2980 if (prefix_r && ra) 2981 break; 2982 op->ea = mlsd_8lsd_ea(word, suffix, regs); 2983 switch (suffixopcode) { 2984 case 41: /* plwa */ 2985 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); 2986 break; 2987 #ifdef CONFIG_VSX 2988 case 42: /* plxsd */ 2989 op->reg = rd + 32; 2990 op->type = MKOP(LOAD_VSX, PREFIXED, 8); 2991 op->element_size = 8; 2992 op->vsx_flags = VSX_CHECK_VEC; 2993 break; 2994 case 43: /* plxssp */ 2995 op->reg = rd + 32; 2996 op->type = MKOP(LOAD_VSX, PREFIXED, 4); 2997 op->element_size = 8; 2998 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2999 break; 3000 case 46: /* pstxsd */ 3001 op->reg = rd + 32; 3002 op->type = MKOP(STORE_VSX, PREFIXED, 8); 3003 op->element_size = 8; 3004 op->vsx_flags = VSX_CHECK_VEC; 3005 break; 3006 case 47: /* pstxssp */ 3007 op->reg = rd + 32; 3008 op->type = MKOP(STORE_VSX, PREFIXED, 4); 3009 op->element_size = 8; 3010 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3011 break; 3012 case 51: /* plxv1 */ 3013 op->reg += 32; 3014 fallthrough; 3015 case 50: /* plxv0 */ 3016 op->type = MKOP(LOAD_VSX, PREFIXED, 16); 3017 op->element_size = 16; 3018 op->vsx_flags = VSX_CHECK_VEC; 3019 break; 3020 case 55: /* pstxv1 */ 3021 op->reg = rd + 32; 3022 fallthrough; 3023 case 54: /* pstxv0 */ 3024 op->type = MKOP(STORE_VSX, PREFIXED, 16); 3025 op->element_size = 16; 3026 op->vsx_flags = VSX_CHECK_VEC; 3027 break; 3028 #endif /* CONFIG_VSX */ 3029 case 56: /* plq */ 3030 op->type = MKOP(LOAD, PREFIXED, 16); 3031 break; 3032 case 57: /* pld */ 3033 op->type = MKOP(LOAD, PREFIXED, 8); 3034 break; 3035 #ifdef CONFIG_VSX 3036 case 58: /* plxvp */ 3037 op->reg = VSX_REGISTER_XTP(rd); 3038 op->type = MKOP(LOAD_VSX, PREFIXED, 32); 3039 op->element_size = 32; 3040 break; 3041 #endif /* CONFIG_VSX */ 3042 case 60: /* pstq */ 3043 op->type = MKOP(STORE, PREFIXED, 16); 3044 break; 3045 case 61: /* pstd */ 3046 op->type = MKOP(STORE, PREFIXED, 8); 3047 break; 3048 #ifdef CONFIG_VSX 3049 case 62: /* pstxvp */ 3050 op->reg = VSX_REGISTER_XTP(rd); 3051 op->type = MKOP(STORE_VSX, PREFIXED, 32); 3052 op->element_size = 32; 3053 break; 3054 #endif /* CONFIG_VSX */ 3055 } 3056 break; 3057 case 1: /* Type 01 Eight-Byte Register-to-Register */ 3058 break; 3059 case 2: /* Type 10 Modified Load/Store */ 3060 if (prefix_r && ra) 3061 break; 3062 op->ea = mlsd_8lsd_ea(word, suffix, regs); 3063 switch (suffixopcode) { 3064 case 32: /* plwz */ 3065 op->type = MKOP(LOAD, PREFIXED, 4); 3066 break; 3067 case 34: /* plbz */ 3068 op->type = MKOP(LOAD, PREFIXED, 1); 3069 break; 3070 case 36: /* pstw */ 3071 op->type = MKOP(STORE, PREFIXED, 4); 3072 break; 3073 case 38: /* pstb */ 3074 op->type = MKOP(STORE, PREFIXED, 1); 3075 break; 3076 case 40: /* plhz */ 3077 op->type = MKOP(LOAD, PREFIXED, 2); 3078 break; 3079 case 42: /* plha */ 3080 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); 3081 break; 3082 case 44: /* psth */ 3083 op->type = MKOP(STORE, PREFIXED, 2); 3084 break; 3085 case 48: /* plfs */ 3086 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); 3087 break; 3088 case 50: /* plfd */ 3089 op->type = MKOP(LOAD_FP, PREFIXED, 8); 3090 break; 3091 case 52: /* pstfs */ 3092 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); 3093 break; 3094 case 54: /* pstfd */ 3095 op->type = MKOP(STORE_FP, PREFIXED, 8); 3096 break; 3097 } 3098 break; 3099 case 3: /* Type 11 Modified Register-to-Register */ 3100 break; 3101 } 3102 #endif /* __powerpc64__ */ 3103 3104 } 3105 3106 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { 3107 switch (GETTYPE(op->type)) { 3108 case LOAD: 3109 if (ra == rd) 3110 goto unknown_opcode; 3111 fallthrough; 3112 case STORE: 3113 case LOAD_FP: 3114 case STORE_FP: 3115 if (ra == 0) 3116 goto unknown_opcode; 3117 } 3118 } 3119 3120 #ifdef CONFIG_VSX 3121 if ((GETTYPE(op->type) == LOAD_VSX || 3122 GETTYPE(op->type) == STORE_VSX) && 3123 !cpu_has_feature(CPU_FTR_VSX)) { 3124 return -1; 3125 } 3126 #endif /* CONFIG_VSX */ 3127 3128 return 0; 3129 3130 unknown_opcode: 3131 op->type = UNKNOWN; 3132 return 0; 3133 3134 logical_done: 3135 if (word & 1) 3136 set_cr0(regs, op); 3137 logical_done_nocc: 3138 op->reg = ra; 3139 op->type |= SETREG; 3140 return 1; 3141 3142 arith_done: 3143 if (word & 1) 3144 set_cr0(regs, op); 3145 compute_done: 3146 op->reg = rd; 3147 op->type |= SETREG; 3148 return 1; 3149 3150 priv: 3151 op->type = INTERRUPT | 0x700; 3152 op->val = SRR1_PROGPRIV; 3153 return 0; 3154 3155 trap: 3156 op->type = INTERRUPT | 0x700; 3157 op->val = SRR1_PROGTRAP; 3158 return 0; 3159 } 3160 EXPORT_SYMBOL_GPL(analyse_instr); 3161 NOKPROBE_SYMBOL(analyse_instr); 3162 3163 /* 3164 * For PPC32 we always use stwu with r1 to change the stack pointer. 3165 * So this emulated store may corrupt the exception frame, now we 3166 * have to provide the exception frame trampoline, which is pushed 3167 * below the kprobed function stack. So we only update gpr[1] but 3168 * don't emulate the real store operation. We will do real store 3169 * operation safely in exception return code by checking this flag. 3170 */ 3171 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) 3172 { 3173 /* 3174 * Check if we already set since that means we'll 3175 * lose the previous value. 3176 */ 3177 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 3178 set_thread_flag(TIF_EMULATE_STACK_STORE); 3179 return 0; 3180 } 3181 3182 static nokprobe_inline void do_signext(unsigned long *valp, int size) 3183 { 3184 switch (size) { 3185 case 2: 3186 *valp = (signed short) *valp; 3187 break; 3188 case 4: 3189 *valp = (signed int) *valp; 3190 break; 3191 } 3192 } 3193 3194 static nokprobe_inline void do_byterev(unsigned long *valp, int size) 3195 { 3196 switch (size) { 3197 case 2: 3198 *valp = byterev_2(*valp); 3199 break; 3200 case 4: 3201 *valp = byterev_4(*valp); 3202 break; 3203 #ifdef __powerpc64__ 3204 case 8: 3205 *valp = byterev_8(*valp); 3206 break; 3207 #endif 3208 } 3209 } 3210 3211 /* 3212 * Emulate an instruction that can be executed just by updating 3213 * fields in *regs. 3214 */ 3215 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) 3216 { 3217 unsigned long next_pc; 3218 3219 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); 3220 switch (GETTYPE(op->type)) { 3221 case COMPUTE: 3222 if (op->type & SETREG) 3223 regs->gpr[op->reg] = op->val; 3224 if (op->type & SETCC) 3225 regs->ccr = op->ccval; 3226 if (op->type & SETXER) 3227 regs->xer = op->xerval; 3228 break; 3229 3230 case BRANCH: 3231 if (op->type & SETLK) 3232 regs->link = next_pc; 3233 if (op->type & BRTAKEN) 3234 next_pc = op->val; 3235 if (op->type & DECCTR) 3236 --regs->ctr; 3237 break; 3238 3239 case BARRIER: 3240 switch (op->type & BARRIER_MASK) { 3241 case BARRIER_SYNC: 3242 mb(); 3243 break; 3244 case BARRIER_ISYNC: 3245 isync(); 3246 break; 3247 case BARRIER_EIEIO: 3248 eieio(); 3249 break; 3250 #ifdef CONFIG_PPC64 3251 case BARRIER_LWSYNC: 3252 asm volatile("lwsync" : : : "memory"); 3253 break; 3254 case BARRIER_PTESYNC: 3255 asm volatile("ptesync" : : : "memory"); 3256 break; 3257 #endif 3258 } 3259 break; 3260 3261 case MFSPR: 3262 switch (op->spr) { 3263 case SPRN_XER: 3264 regs->gpr[op->reg] = regs->xer & 0xffffffffUL; 3265 break; 3266 case SPRN_LR: 3267 regs->gpr[op->reg] = regs->link; 3268 break; 3269 case SPRN_CTR: 3270 regs->gpr[op->reg] = regs->ctr; 3271 break; 3272 default: 3273 WARN_ON_ONCE(1); 3274 } 3275 break; 3276 3277 case MTSPR: 3278 switch (op->spr) { 3279 case SPRN_XER: 3280 regs->xer = op->val & 0xffffffffUL; 3281 break; 3282 case SPRN_LR: 3283 regs->link = op->val; 3284 break; 3285 case SPRN_CTR: 3286 regs->ctr = op->val; 3287 break; 3288 default: 3289 WARN_ON_ONCE(1); 3290 } 3291 break; 3292 3293 default: 3294 WARN_ON_ONCE(1); 3295 } 3296 regs_set_return_ip(regs, next_pc); 3297 } 3298 NOKPROBE_SYMBOL(emulate_update_regs); 3299 3300 /* 3301 * Emulate a previously-analysed load or store instruction. 3302 * Return values are: 3303 * 0 = instruction emulated successfully 3304 * -EFAULT = address out of range or access faulted (regs->dar 3305 * contains the faulting address) 3306 * -EACCES = misaligned access, instruction requires alignment 3307 * -EINVAL = unknown operation in *op 3308 */ 3309 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) 3310 { 3311 int err, size, type; 3312 int i, rd, nb; 3313 unsigned int cr; 3314 unsigned long val; 3315 unsigned long ea; 3316 bool cross_endian; 3317 3318 err = 0; 3319 size = GETSIZE(op->type); 3320 type = GETTYPE(op->type); 3321 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 3322 ea = truncate_if_32bit(regs->msr, op->ea); 3323 3324 switch (type) { 3325 case LARX: 3326 if (ea & (size - 1)) 3327 return -EACCES; /* can't handle misaligned */ 3328 if (!address_ok(regs, ea, size)) 3329 return -EFAULT; 3330 err = 0; 3331 val = 0; 3332 switch (size) { 3333 #ifdef CONFIG_PPC_HAS_LBARX_LHARX 3334 case 1: 3335 __get_user_asmx(val, ea, err, "lbarx"); 3336 break; 3337 case 2: 3338 __get_user_asmx(val, ea, err, "lharx"); 3339 break; 3340 #endif 3341 case 4: 3342 __get_user_asmx(val, ea, err, "lwarx"); 3343 break; 3344 #ifdef __powerpc64__ 3345 case 8: 3346 __get_user_asmx(val, ea, err, "ldarx"); 3347 break; 3348 case 16: 3349 err = do_lqarx(ea, ®s->gpr[op->reg]); 3350 break; 3351 #endif 3352 default: 3353 return -EINVAL; 3354 } 3355 if (err) { 3356 regs->dar = ea; 3357 break; 3358 } 3359 if (size < 16) 3360 regs->gpr[op->reg] = val; 3361 break; 3362 3363 case STCX: 3364 if (ea & (size - 1)) 3365 return -EACCES; /* can't handle misaligned */ 3366 if (!address_ok(regs, ea, size)) 3367 return -EFAULT; 3368 err = 0; 3369 switch (size) { 3370 #ifdef __powerpc64__ 3371 case 1: 3372 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3373 break; 3374 case 2: 3375 __put_user_asmx(op->val, ea, err, "sthcx.", cr); 3376 break; 3377 #endif 3378 case 4: 3379 __put_user_asmx(op->val, ea, err, "stwcx.", cr); 3380 break; 3381 #ifdef __powerpc64__ 3382 case 8: 3383 __put_user_asmx(op->val, ea, err, "stdcx.", cr); 3384 break; 3385 case 16: 3386 err = do_stqcx(ea, regs->gpr[op->reg], 3387 regs->gpr[op->reg + 1], &cr); 3388 break; 3389 #endif 3390 default: 3391 return -EINVAL; 3392 } 3393 if (!err) 3394 regs->ccr = (regs->ccr & 0x0fffffff) | 3395 (cr & 0xe0000000) | 3396 ((regs->xer >> 3) & 0x10000000); 3397 else 3398 regs->dar = ea; 3399 break; 3400 3401 case LOAD: 3402 #ifdef __powerpc64__ 3403 if (size == 16) { 3404 err = emulate_lq(regs, ea, op->reg, cross_endian); 3405 break; 3406 } 3407 #endif 3408 err = read_mem(®s->gpr[op->reg], ea, size, regs); 3409 if (!err) { 3410 if (op->type & SIGNEXT) 3411 do_signext(®s->gpr[op->reg], size); 3412 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) 3413 do_byterev(®s->gpr[op->reg], size); 3414 } 3415 break; 3416 3417 #ifdef CONFIG_PPC_FPU 3418 case LOAD_FP: 3419 /* 3420 * If the instruction is in userspace, we can emulate it even 3421 * if the VMX state is not live, because we have the state 3422 * stored in the thread_struct. If the instruction is in 3423 * the kernel, we must not touch the state in the thread_struct. 3424 */ 3425 if (!user_mode(regs) && !(regs->msr & MSR_FP)) 3426 return 0; 3427 err = do_fp_load(op, ea, regs, cross_endian); 3428 break; 3429 #endif 3430 #ifdef CONFIG_ALTIVEC 3431 case LOAD_VMX: 3432 if (!user_mode(regs) && !(regs->msr & MSR_VEC)) 3433 return 0; 3434 err = do_vec_load(op->reg, ea, size, regs, cross_endian); 3435 break; 3436 #endif 3437 #ifdef CONFIG_VSX 3438 case LOAD_VSX: { 3439 unsigned long msrbit = MSR_VSX; 3440 3441 /* 3442 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3443 * when the target of the instruction is a vector register. 3444 */ 3445 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3446 msrbit = MSR_VEC; 3447 if (!user_mode(regs) && !(regs->msr & msrbit)) 3448 return 0; 3449 err = do_vsx_load(op, ea, regs, cross_endian); 3450 break; 3451 } 3452 #endif 3453 case LOAD_MULTI: 3454 if (!address_ok(regs, ea, size)) 3455 return -EFAULT; 3456 rd = op->reg; 3457 for (i = 0; i < size; i += 4) { 3458 unsigned int v32 = 0; 3459 3460 nb = size - i; 3461 if (nb > 4) 3462 nb = 4; 3463 err = copy_mem_in((u8 *) &v32, ea, nb, regs); 3464 if (err) 3465 break; 3466 if (unlikely(cross_endian)) 3467 v32 = byterev_4(v32); 3468 regs->gpr[rd] = v32; 3469 ea += 4; 3470 /* reg number wraps from 31 to 0 for lsw[ix] */ 3471 rd = (rd + 1) & 0x1f; 3472 } 3473 break; 3474 3475 case STORE: 3476 #ifdef __powerpc64__ 3477 if (size == 16) { 3478 err = emulate_stq(regs, ea, op->reg, cross_endian); 3479 break; 3480 } 3481 #endif 3482 if ((op->type & UPDATE) && size == sizeof(long) && 3483 op->reg == 1 && op->update_reg == 1 && !user_mode(regs) && 3484 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 3485 err = handle_stack_update(ea, regs); 3486 break; 3487 } 3488 if (unlikely(cross_endian)) 3489 do_byterev(&op->val, size); 3490 err = write_mem(op->val, ea, size, regs); 3491 break; 3492 3493 #ifdef CONFIG_PPC_FPU 3494 case STORE_FP: 3495 if (!user_mode(regs) && !(regs->msr & MSR_FP)) 3496 return 0; 3497 err = do_fp_store(op, ea, regs, cross_endian); 3498 break; 3499 #endif 3500 #ifdef CONFIG_ALTIVEC 3501 case STORE_VMX: 3502 if (!user_mode(regs) && !(regs->msr & MSR_VEC)) 3503 return 0; 3504 err = do_vec_store(op->reg, ea, size, regs, cross_endian); 3505 break; 3506 #endif 3507 #ifdef CONFIG_VSX 3508 case STORE_VSX: { 3509 unsigned long msrbit = MSR_VSX; 3510 3511 /* 3512 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3513 * when the target of the instruction is a vector register. 3514 */ 3515 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3516 msrbit = MSR_VEC; 3517 if (!user_mode(regs) && !(regs->msr & msrbit)) 3518 return 0; 3519 err = do_vsx_store(op, ea, regs, cross_endian); 3520 break; 3521 } 3522 #endif 3523 case STORE_MULTI: 3524 if (!address_ok(regs, ea, size)) 3525 return -EFAULT; 3526 rd = op->reg; 3527 for (i = 0; i < size; i += 4) { 3528 unsigned int v32 = regs->gpr[rd]; 3529 3530 nb = size - i; 3531 if (nb > 4) 3532 nb = 4; 3533 if (unlikely(cross_endian)) 3534 v32 = byterev_4(v32); 3535 err = copy_mem_out((u8 *) &v32, ea, nb, regs); 3536 if (err) 3537 break; 3538 ea += 4; 3539 /* reg number wraps from 31 to 0 for stsw[ix] */ 3540 rd = (rd + 1) & 0x1f; 3541 } 3542 break; 3543 3544 default: 3545 return -EINVAL; 3546 } 3547 3548 if (err) 3549 return err; 3550 3551 if (op->type & UPDATE) 3552 regs->gpr[op->update_reg] = op->ea; 3553 3554 return 0; 3555 } 3556 NOKPROBE_SYMBOL(emulate_loadstore); 3557 3558 /* 3559 * Emulate instructions that cause a transfer of control, 3560 * loads and stores, and a few other instructions. 3561 * Returns 1 if the step was emulated, 0 if not, 3562 * or -1 if the instruction is one that should not be stepped, 3563 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3564 */ 3565 int emulate_step(struct pt_regs *regs, ppc_inst_t instr) 3566 { 3567 struct instruction_op op; 3568 int r, err, type; 3569 unsigned long val; 3570 unsigned long ea; 3571 3572 r = analyse_instr(&op, regs, instr); 3573 if (r < 0) 3574 return r; 3575 if (r > 0) { 3576 emulate_update_regs(regs, &op); 3577 return 1; 3578 } 3579 3580 err = 0; 3581 type = GETTYPE(op.type); 3582 3583 if (OP_IS_LOAD_STORE(type)) { 3584 err = emulate_loadstore(regs, &op); 3585 if (err) 3586 return 0; 3587 goto instr_done; 3588 } 3589 3590 switch (type) { 3591 case CACHEOP: 3592 ea = truncate_if_32bit(regs->msr, op.ea); 3593 if (!address_ok(regs, ea, 8)) 3594 return 0; 3595 switch (op.type & CACHEOP_MASK) { 3596 case DCBST: 3597 __cacheop_user_asmx(ea, err, "dcbst"); 3598 break; 3599 case DCBF: 3600 __cacheop_user_asmx(ea, err, "dcbf"); 3601 break; 3602 case DCBTST: 3603 if (op.reg == 0) 3604 prefetchw((void *) ea); 3605 break; 3606 case DCBT: 3607 if (op.reg == 0) 3608 prefetch((void *) ea); 3609 break; 3610 case ICBI: 3611 __cacheop_user_asmx(ea, err, "icbi"); 3612 break; 3613 case DCBZ: 3614 err = emulate_dcbz(ea, regs); 3615 break; 3616 } 3617 if (err) { 3618 regs->dar = ea; 3619 return 0; 3620 } 3621 goto instr_done; 3622 3623 case MFMSR: 3624 regs->gpr[op.reg] = regs->msr & MSR_MASK; 3625 goto instr_done; 3626 3627 case MTMSR: 3628 val = regs->gpr[op.reg]; 3629 if ((val & MSR_RI) == 0) 3630 /* can't step mtmsr[d] that would clear MSR_RI */ 3631 return -1; 3632 /* here op.val is the mask of bits to change */ 3633 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3634 goto instr_done; 3635 3636 case SYSCALL: /* sc */ 3637 /* 3638 * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't 3639 * single step a system call instruction: 3640 * 3641 * Successful completion for an instruction means that the 3642 * instruction caused no other interrupt. Thus a Trace 3643 * interrupt never occurs for a System Call or System Call 3644 * Vectored instruction, or for a Trap instruction that 3645 * traps. 3646 */ 3647 return -1; 3648 case SYSCALL_VECTORED_0: /* scv 0 */ 3649 return -1; 3650 case RFI: 3651 return -1; 3652 } 3653 return 0; 3654 3655 instr_done: 3656 regs_set_return_ip(regs, 3657 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3658 return 1; 3659 } 3660 NOKPROBE_SYMBOL(emulate_step); 3661