1 /* 2 * Single-step support. 3 * 4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/kprobes.h> 13 #include <linux/ptrace.h> 14 #include <linux/prefetch.h> 15 #include <asm/sstep.h> 16 #include <asm/processor.h> 17 #include <asm/uaccess.h> 18 #include <asm/cputable.h> 19 20 extern char system_call_common[]; 21 22 #ifdef CONFIG_PPC64 23 /* Bits in SRR1 that are copied from MSR */ 24 #define MSR_MASK 0xffffffff87c0ffffUL 25 #else 26 #define MSR_MASK 0x87c0ffff 27 #endif 28 29 /* Bits in XER */ 30 #define XER_SO 0x80000000U 31 #define XER_OV 0x40000000U 32 #define XER_CA 0x20000000U 33 34 #ifdef CONFIG_PPC_FPU 35 /* 36 * Functions in ldstfp.S 37 */ 38 extern int do_lfs(int rn, unsigned long ea); 39 extern int do_lfd(int rn, unsigned long ea); 40 extern int do_stfs(int rn, unsigned long ea); 41 extern int do_stfd(int rn, unsigned long ea); 42 extern int do_lvx(int rn, unsigned long ea); 43 extern int do_stvx(int rn, unsigned long ea); 44 extern int do_lxvd2x(int rn, unsigned long ea); 45 extern int do_stxvd2x(int rn, unsigned long ea); 46 #endif 47 48 /* 49 * Emulate the truncation of 64 bit values in 32-bit mode. 50 */ 51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) 52 { 53 #ifdef __powerpc64__ 54 if ((msr & MSR_64BIT) == 0) 55 val &= 0xffffffffUL; 56 #endif 57 return val; 58 } 59 60 /* 61 * Determine whether a conditional branch instruction would branch. 62 */ 63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) 64 { 65 unsigned int bo = (instr >> 21) & 0x1f; 66 unsigned int bi; 67 68 if ((bo & 4) == 0) { 69 /* decrement counter */ 70 --regs->ctr; 71 if (((bo >> 1) & 1) ^ (regs->ctr == 0)) 72 return 0; 73 } 74 if ((bo & 0x10) == 0) { 75 /* check bit from CR */ 76 bi = (instr >> 16) & 0x1f; 77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 78 return 0; 79 } 80 return 1; 81 } 82 83 84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb) 85 { 86 if (!user_mode(regs)) 87 return 1; 88 return __access_ok(ea, nb, USER_DS); 89 } 90 91 /* 92 * Calculate effective address for a D-form instruction 93 */ 94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs) 95 { 96 int ra; 97 unsigned long ea; 98 99 ra = (instr >> 16) & 0x1f; 100 ea = (signed short) instr; /* sign-extend */ 101 if (ra) 102 ea += regs->gpr[ra]; 103 104 return truncate_if_32bit(regs->msr, ea); 105 } 106 107 #ifdef __powerpc64__ 108 /* 109 * Calculate effective address for a DS-form instruction 110 */ 111 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs) 112 { 113 int ra; 114 unsigned long ea; 115 116 ra = (instr >> 16) & 0x1f; 117 ea = (signed short) (instr & ~3); /* sign-extend */ 118 if (ra) 119 ea += regs->gpr[ra]; 120 121 return truncate_if_32bit(regs->msr, ea); 122 } 123 #endif /* __powerpc64 */ 124 125 /* 126 * Calculate effective address for an X-form instruction 127 */ 128 static unsigned long __kprobes xform_ea(unsigned int instr, 129 struct pt_regs *regs) 130 { 131 int ra, rb; 132 unsigned long ea; 133 134 ra = (instr >> 16) & 0x1f; 135 rb = (instr >> 11) & 0x1f; 136 ea = regs->gpr[rb]; 137 if (ra) 138 ea += regs->gpr[ra]; 139 140 return truncate_if_32bit(regs->msr, ea); 141 } 142 143 /* 144 * Return the largest power of 2, not greater than sizeof(unsigned long), 145 * such that x is a multiple of it. 146 */ 147 static inline unsigned long max_align(unsigned long x) 148 { 149 x |= sizeof(unsigned long); 150 return x & -x; /* isolates rightmost bit */ 151 } 152 153 154 static inline unsigned long byterev_2(unsigned long x) 155 { 156 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 157 } 158 159 static inline unsigned long byterev_4(unsigned long x) 160 { 161 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 162 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 163 } 164 165 #ifdef __powerpc64__ 166 static inline unsigned long byterev_8(unsigned long x) 167 { 168 return (byterev_4(x) << 32) | byterev_4(x >> 32); 169 } 170 #endif 171 172 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea, 173 int nb) 174 { 175 int err = 0; 176 unsigned long x = 0; 177 178 switch (nb) { 179 case 1: 180 err = __get_user(x, (unsigned char __user *) ea); 181 break; 182 case 2: 183 err = __get_user(x, (unsigned short __user *) ea); 184 break; 185 case 4: 186 err = __get_user(x, (unsigned int __user *) ea); 187 break; 188 #ifdef __powerpc64__ 189 case 8: 190 err = __get_user(x, (unsigned long __user *) ea); 191 break; 192 #endif 193 } 194 if (!err) 195 *dest = x; 196 return err; 197 } 198 199 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, 200 int nb, struct pt_regs *regs) 201 { 202 int err; 203 unsigned long x, b, c; 204 #ifdef __LITTLE_ENDIAN__ 205 int len = nb; /* save a copy of the length for byte reversal */ 206 #endif 207 208 /* unaligned, do this in pieces */ 209 x = 0; 210 for (; nb > 0; nb -= c) { 211 #ifdef __LITTLE_ENDIAN__ 212 c = 1; 213 #endif 214 #ifdef __BIG_ENDIAN__ 215 c = max_align(ea); 216 #endif 217 if (c > nb) 218 c = max_align(nb); 219 err = read_mem_aligned(&b, ea, c); 220 if (err) 221 return err; 222 x = (x << (8 * c)) + b; 223 ea += c; 224 } 225 #ifdef __LITTLE_ENDIAN__ 226 switch (len) { 227 case 2: 228 *dest = byterev_2(x); 229 break; 230 case 4: 231 *dest = byterev_4(x); 232 break; 233 #ifdef __powerpc64__ 234 case 8: 235 *dest = byterev_8(x); 236 break; 237 #endif 238 } 239 #endif 240 #ifdef __BIG_ENDIAN__ 241 *dest = x; 242 #endif 243 return 0; 244 } 245 246 /* 247 * Read memory at address ea for nb bytes, return 0 for success 248 * or -EFAULT if an error occurred. 249 */ 250 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb, 251 struct pt_regs *regs) 252 { 253 if (!address_ok(regs, ea, nb)) 254 return -EFAULT; 255 if ((ea & (nb - 1)) == 0) 256 return read_mem_aligned(dest, ea, nb); 257 return read_mem_unaligned(dest, ea, nb, regs); 258 } 259 260 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea, 261 int nb) 262 { 263 int err = 0; 264 265 switch (nb) { 266 case 1: 267 err = __put_user(val, (unsigned char __user *) ea); 268 break; 269 case 2: 270 err = __put_user(val, (unsigned short __user *) ea); 271 break; 272 case 4: 273 err = __put_user(val, (unsigned int __user *) ea); 274 break; 275 #ifdef __powerpc64__ 276 case 8: 277 err = __put_user(val, (unsigned long __user *) ea); 278 break; 279 #endif 280 } 281 return err; 282 } 283 284 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, 285 int nb, struct pt_regs *regs) 286 { 287 int err; 288 unsigned long c; 289 290 #ifdef __LITTLE_ENDIAN__ 291 switch (nb) { 292 case 2: 293 val = byterev_2(val); 294 break; 295 case 4: 296 val = byterev_4(val); 297 break; 298 #ifdef __powerpc64__ 299 case 8: 300 val = byterev_8(val); 301 break; 302 #endif 303 } 304 #endif 305 /* unaligned or little-endian, do this in pieces */ 306 for (; nb > 0; nb -= c) { 307 #ifdef __LITTLE_ENDIAN__ 308 c = 1; 309 #endif 310 #ifdef __BIG_ENDIAN__ 311 c = max_align(ea); 312 #endif 313 if (c > nb) 314 c = max_align(nb); 315 err = write_mem_aligned(val >> (nb - c) * 8, ea, c); 316 if (err) 317 return err; 318 ea += c; 319 } 320 return 0; 321 } 322 323 /* 324 * Write memory at address ea for nb bytes, return 0 for success 325 * or -EFAULT if an error occurred. 326 */ 327 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, 328 struct pt_regs *regs) 329 { 330 if (!address_ok(regs, ea, nb)) 331 return -EFAULT; 332 if ((ea & (nb - 1)) == 0) 333 return write_mem_aligned(val, ea, nb); 334 return write_mem_unaligned(val, ea, nb, regs); 335 } 336 337 #ifdef CONFIG_PPC_FPU 338 /* 339 * Check the address and alignment, and call func to do the actual 340 * load or store. 341 */ 342 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), 343 unsigned long ea, int nb, 344 struct pt_regs *regs) 345 { 346 int err; 347 union { 348 double dbl; 349 unsigned long ul[2]; 350 struct { 351 #ifdef __BIG_ENDIAN__ 352 unsigned _pad_; 353 unsigned word; 354 #endif 355 #ifdef __LITTLE_ENDIAN__ 356 unsigned word; 357 unsigned _pad_; 358 #endif 359 } single; 360 } data; 361 unsigned long ptr; 362 363 if (!address_ok(regs, ea, nb)) 364 return -EFAULT; 365 if ((ea & 3) == 0) 366 return (*func)(rn, ea); 367 ptr = (unsigned long) &data.ul; 368 if (sizeof(unsigned long) == 8 || nb == 4) { 369 err = read_mem_unaligned(&data.ul[0], ea, nb, regs); 370 if (nb == 4) 371 ptr = (unsigned long)&(data.single.word); 372 } else { 373 /* reading a double on 32-bit */ 374 err = read_mem_unaligned(&data.ul[0], ea, 4, regs); 375 if (!err) 376 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs); 377 } 378 if (err) 379 return err; 380 return (*func)(rn, ptr); 381 } 382 383 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), 384 unsigned long ea, int nb, 385 struct pt_regs *regs) 386 { 387 int err; 388 union { 389 double dbl; 390 unsigned long ul[2]; 391 struct { 392 #ifdef __BIG_ENDIAN__ 393 unsigned _pad_; 394 unsigned word; 395 #endif 396 #ifdef __LITTLE_ENDIAN__ 397 unsigned word; 398 unsigned _pad_; 399 #endif 400 } single; 401 } data; 402 unsigned long ptr; 403 404 if (!address_ok(regs, ea, nb)) 405 return -EFAULT; 406 if ((ea & 3) == 0) 407 return (*func)(rn, ea); 408 ptr = (unsigned long) &data.ul[0]; 409 if (sizeof(unsigned long) == 8 || nb == 4) { 410 if (nb == 4) 411 ptr = (unsigned long)&(data.single.word); 412 err = (*func)(rn, ptr); 413 if (err) 414 return err; 415 err = write_mem_unaligned(data.ul[0], ea, nb, regs); 416 } else { 417 /* writing a double on 32-bit */ 418 err = (*func)(rn, ptr); 419 if (err) 420 return err; 421 err = write_mem_unaligned(data.ul[0], ea, 4, regs); 422 if (!err) 423 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs); 424 } 425 return err; 426 } 427 #endif 428 429 #ifdef CONFIG_ALTIVEC 430 /* For Altivec/VMX, no need to worry about alignment */ 431 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long), 432 unsigned long ea, struct pt_regs *regs) 433 { 434 if (!address_ok(regs, ea & ~0xfUL, 16)) 435 return -EFAULT; 436 return (*func)(rn, ea); 437 } 438 439 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long), 440 unsigned long ea, struct pt_regs *regs) 441 { 442 if (!address_ok(regs, ea & ~0xfUL, 16)) 443 return -EFAULT; 444 return (*func)(rn, ea); 445 } 446 #endif /* CONFIG_ALTIVEC */ 447 448 #ifdef CONFIG_VSX 449 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long), 450 unsigned long ea, struct pt_regs *regs) 451 { 452 int err; 453 unsigned long val[2]; 454 455 if (!address_ok(regs, ea, 16)) 456 return -EFAULT; 457 if ((ea & 3) == 0) 458 return (*func)(rn, ea); 459 err = read_mem_unaligned(&val[0], ea, 8, regs); 460 if (!err) 461 err = read_mem_unaligned(&val[1], ea + 8, 8, regs); 462 if (!err) 463 err = (*func)(rn, (unsigned long) &val[0]); 464 return err; 465 } 466 467 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), 468 unsigned long ea, struct pt_regs *regs) 469 { 470 int err; 471 unsigned long val[2]; 472 473 if (!address_ok(regs, ea, 16)) 474 return -EFAULT; 475 if ((ea & 3) == 0) 476 return (*func)(rn, ea); 477 err = (*func)(rn, (unsigned long) &val[0]); 478 if (err) 479 return err; 480 err = write_mem_unaligned(val[0], ea, 8, regs); 481 if (!err) 482 err = write_mem_unaligned(val[1], ea + 8, 8, regs); 483 return err; 484 } 485 #endif /* CONFIG_VSX */ 486 487 #define __put_user_asmx(x, addr, err, op, cr) \ 488 __asm__ __volatile__( \ 489 "1: " op " %2,0,%3\n" \ 490 " mfcr %1\n" \ 491 "2:\n" \ 492 ".section .fixup,\"ax\"\n" \ 493 "3: li %0,%4\n" \ 494 " b 2b\n" \ 495 ".previous\n" \ 496 EX_TABLE(1b, 3b) \ 497 : "=r" (err), "=r" (cr) \ 498 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 499 500 #define __get_user_asmx(x, addr, err, op) \ 501 __asm__ __volatile__( \ 502 "1: "op" %1,0,%2\n" \ 503 "2:\n" \ 504 ".section .fixup,\"ax\"\n" \ 505 "3: li %0,%3\n" \ 506 " b 2b\n" \ 507 ".previous\n" \ 508 EX_TABLE(1b, 3b) \ 509 : "=r" (err), "=r" (x) \ 510 : "r" (addr), "i" (-EFAULT), "0" (err)) 511 512 #define __cacheop_user_asmx(addr, err, op) \ 513 __asm__ __volatile__( \ 514 "1: "op" 0,%1\n" \ 515 "2:\n" \ 516 ".section .fixup,\"ax\"\n" \ 517 "3: li %0,%3\n" \ 518 " b 2b\n" \ 519 ".previous\n" \ 520 EX_TABLE(1b, 3b) \ 521 : "=r" (err) \ 522 : "r" (addr), "i" (-EFAULT), "0" (err)) 523 524 static void __kprobes set_cr0(struct pt_regs *regs, int rd) 525 { 526 long val = regs->gpr[rd]; 527 528 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 529 #ifdef __powerpc64__ 530 if (!(regs->msr & MSR_64BIT)) 531 val = (int) val; 532 #endif 533 if (val < 0) 534 regs->ccr |= 0x80000000; 535 else if (val > 0) 536 regs->ccr |= 0x40000000; 537 else 538 regs->ccr |= 0x20000000; 539 } 540 541 static void __kprobes add_with_carry(struct pt_regs *regs, int rd, 542 unsigned long val1, unsigned long val2, 543 unsigned long carry_in) 544 { 545 unsigned long val = val1 + val2; 546 547 if (carry_in) 548 ++val; 549 regs->gpr[rd] = val; 550 #ifdef __powerpc64__ 551 if (!(regs->msr & MSR_64BIT)) { 552 val = (unsigned int) val; 553 val1 = (unsigned int) val1; 554 } 555 #endif 556 if (val < val1 || (carry_in && val == val1)) 557 regs->xer |= XER_CA; 558 else 559 regs->xer &= ~XER_CA; 560 } 561 562 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2, 563 int crfld) 564 { 565 unsigned int crval, shift; 566 567 crval = (regs->xer >> 31) & 1; /* get SO bit */ 568 if (v1 < v2) 569 crval |= 8; 570 else if (v1 > v2) 571 crval |= 4; 572 else 573 crval |= 2; 574 shift = (7 - crfld) * 4; 575 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); 576 } 577 578 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, 579 unsigned long v2, int crfld) 580 { 581 unsigned int crval, shift; 582 583 crval = (regs->xer >> 31) & 1; /* get SO bit */ 584 if (v1 < v2) 585 crval |= 8; 586 else if (v1 > v2) 587 crval |= 4; 588 else 589 crval |= 2; 590 shift = (7 - crfld) * 4; 591 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); 592 } 593 594 static int __kprobes trap_compare(long v1, long v2) 595 { 596 int ret = 0; 597 598 if (v1 < v2) 599 ret |= 0x10; 600 else if (v1 > v2) 601 ret |= 0x08; 602 else 603 ret |= 0x04; 604 if ((unsigned long)v1 < (unsigned long)v2) 605 ret |= 0x02; 606 else if ((unsigned long)v1 > (unsigned long)v2) 607 ret |= 0x01; 608 return ret; 609 } 610 611 /* 612 * Elements of 32-bit rotate and mask instructions. 613 */ 614 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 615 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 616 #ifdef __powerpc64__ 617 #define MASK64_L(mb) (~0UL >> (mb)) 618 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 619 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 620 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 621 #else 622 #define DATA32(x) (x) 623 #endif 624 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 625 626 /* 627 * Decode an instruction, and execute it if that can be done just by 628 * modifying *regs (i.e. integer arithmetic and logical instructions, 629 * branches, and barrier instructions). 630 * Returns 1 if the instruction has been executed, or 0 if not. 631 * Sets *op to indicate what the instruction does. 632 */ 633 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, 634 unsigned int instr) 635 { 636 unsigned int opcode, ra, rb, rd, spr, u; 637 unsigned long int imm; 638 unsigned long int val, val2; 639 unsigned int mb, me, sh; 640 long ival; 641 642 op->type = COMPUTE; 643 644 opcode = instr >> 26; 645 switch (opcode) { 646 case 16: /* bc */ 647 op->type = BRANCH; 648 imm = (signed short)(instr & 0xfffc); 649 if ((instr & 2) == 0) 650 imm += regs->nip; 651 regs->nip += 4; 652 regs->nip = truncate_if_32bit(regs->msr, regs->nip); 653 if (instr & 1) 654 regs->link = regs->nip; 655 if (branch_taken(instr, regs)) 656 regs->nip = truncate_if_32bit(regs->msr, imm); 657 return 1; 658 #ifdef CONFIG_PPC64 659 case 17: /* sc */ 660 if ((instr & 0xfe2) == 2) 661 op->type = SYSCALL; 662 else 663 op->type = UNKNOWN; 664 return 0; 665 #endif 666 case 18: /* b */ 667 op->type = BRANCH; 668 imm = instr & 0x03fffffc; 669 if (imm & 0x02000000) 670 imm -= 0x04000000; 671 if ((instr & 2) == 0) 672 imm += regs->nip; 673 if (instr & 1) 674 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); 675 imm = truncate_if_32bit(regs->msr, imm); 676 regs->nip = imm; 677 return 1; 678 case 19: 679 switch ((instr >> 1) & 0x3ff) { 680 case 0: /* mcrf */ 681 rd = (instr >> 21) & 0x1c; 682 ra = (instr >> 16) & 0x1c; 683 val = (regs->ccr >> ra) & 0xf; 684 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 685 goto instr_done; 686 687 case 16: /* bclr */ 688 case 528: /* bcctr */ 689 op->type = BRANCH; 690 imm = (instr & 0x400)? regs->ctr: regs->link; 691 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 692 imm = truncate_if_32bit(regs->msr, imm); 693 if (instr & 1) 694 regs->link = regs->nip; 695 if (branch_taken(instr, regs)) 696 regs->nip = imm; 697 return 1; 698 699 case 18: /* rfid, scary */ 700 if (regs->msr & MSR_PR) 701 goto priv; 702 op->type = RFI; 703 return 0; 704 705 case 150: /* isync */ 706 op->type = BARRIER; 707 isync(); 708 goto instr_done; 709 710 case 33: /* crnor */ 711 case 129: /* crandc */ 712 case 193: /* crxor */ 713 case 225: /* crnand */ 714 case 257: /* crand */ 715 case 289: /* creqv */ 716 case 417: /* crorc */ 717 case 449: /* cror */ 718 ra = (instr >> 16) & 0x1f; 719 rb = (instr >> 11) & 0x1f; 720 rd = (instr >> 21) & 0x1f; 721 ra = (regs->ccr >> (31 - ra)) & 1; 722 rb = (regs->ccr >> (31 - rb)) & 1; 723 val = (instr >> (6 + ra * 2 + rb)) & 1; 724 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | 725 (val << (31 - rd)); 726 goto instr_done; 727 } 728 break; 729 case 31: 730 switch ((instr >> 1) & 0x3ff) { 731 case 598: /* sync */ 732 op->type = BARRIER; 733 #ifdef __powerpc64__ 734 switch ((instr >> 21) & 3) { 735 case 1: /* lwsync */ 736 asm volatile("lwsync" : : : "memory"); 737 goto instr_done; 738 case 2: /* ptesync */ 739 asm volatile("ptesync" : : : "memory"); 740 goto instr_done; 741 } 742 #endif 743 mb(); 744 goto instr_done; 745 746 case 854: /* eieio */ 747 op->type = BARRIER; 748 eieio(); 749 goto instr_done; 750 } 751 break; 752 } 753 754 /* Following cases refer to regs->gpr[], so we need all regs */ 755 if (!FULL_REGS(regs)) 756 return 0; 757 758 rd = (instr >> 21) & 0x1f; 759 ra = (instr >> 16) & 0x1f; 760 rb = (instr >> 11) & 0x1f; 761 762 switch (opcode) { 763 #ifdef __powerpc64__ 764 case 2: /* tdi */ 765 if (rd & trap_compare(regs->gpr[ra], (short) instr)) 766 goto trap; 767 goto instr_done; 768 #endif 769 case 3: /* twi */ 770 if (rd & trap_compare((int)regs->gpr[ra], (short) instr)) 771 goto trap; 772 goto instr_done; 773 774 case 7: /* mulli */ 775 regs->gpr[rd] = regs->gpr[ra] * (short) instr; 776 goto instr_done; 777 778 case 8: /* subfic */ 779 imm = (short) instr; 780 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); 781 goto instr_done; 782 783 case 10: /* cmpli */ 784 imm = (unsigned short) instr; 785 val = regs->gpr[ra]; 786 #ifdef __powerpc64__ 787 if ((rd & 1) == 0) 788 val = (unsigned int) val; 789 #endif 790 do_cmp_unsigned(regs, val, imm, rd >> 2); 791 goto instr_done; 792 793 case 11: /* cmpi */ 794 imm = (short) instr; 795 val = regs->gpr[ra]; 796 #ifdef __powerpc64__ 797 if ((rd & 1) == 0) 798 val = (int) val; 799 #endif 800 do_cmp_signed(regs, val, imm, rd >> 2); 801 goto instr_done; 802 803 case 12: /* addic */ 804 imm = (short) instr; 805 add_with_carry(regs, rd, regs->gpr[ra], imm, 0); 806 goto instr_done; 807 808 case 13: /* addic. */ 809 imm = (short) instr; 810 add_with_carry(regs, rd, regs->gpr[ra], imm, 0); 811 set_cr0(regs, rd); 812 goto instr_done; 813 814 case 14: /* addi */ 815 imm = (short) instr; 816 if (ra) 817 imm += regs->gpr[ra]; 818 regs->gpr[rd] = imm; 819 goto instr_done; 820 821 case 15: /* addis */ 822 imm = ((short) instr) << 16; 823 if (ra) 824 imm += regs->gpr[ra]; 825 regs->gpr[rd] = imm; 826 goto instr_done; 827 828 case 20: /* rlwimi */ 829 mb = (instr >> 6) & 0x1f; 830 me = (instr >> 1) & 0x1f; 831 val = DATA32(regs->gpr[rd]); 832 imm = MASK32(mb, me); 833 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 834 goto logical_done; 835 836 case 21: /* rlwinm */ 837 mb = (instr >> 6) & 0x1f; 838 me = (instr >> 1) & 0x1f; 839 val = DATA32(regs->gpr[rd]); 840 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); 841 goto logical_done; 842 843 case 23: /* rlwnm */ 844 mb = (instr >> 6) & 0x1f; 845 me = (instr >> 1) & 0x1f; 846 rb = regs->gpr[rb] & 0x1f; 847 val = DATA32(regs->gpr[rd]); 848 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); 849 goto logical_done; 850 851 case 24: /* ori */ 852 imm = (unsigned short) instr; 853 regs->gpr[ra] = regs->gpr[rd] | imm; 854 goto instr_done; 855 856 case 25: /* oris */ 857 imm = (unsigned short) instr; 858 regs->gpr[ra] = regs->gpr[rd] | (imm << 16); 859 goto instr_done; 860 861 case 26: /* xori */ 862 imm = (unsigned short) instr; 863 regs->gpr[ra] = regs->gpr[rd] ^ imm; 864 goto instr_done; 865 866 case 27: /* xoris */ 867 imm = (unsigned short) instr; 868 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); 869 goto instr_done; 870 871 case 28: /* andi. */ 872 imm = (unsigned short) instr; 873 regs->gpr[ra] = regs->gpr[rd] & imm; 874 set_cr0(regs, ra); 875 goto instr_done; 876 877 case 29: /* andis. */ 878 imm = (unsigned short) instr; 879 regs->gpr[ra] = regs->gpr[rd] & (imm << 16); 880 set_cr0(regs, ra); 881 goto instr_done; 882 883 #ifdef __powerpc64__ 884 case 30: /* rld* */ 885 mb = ((instr >> 6) & 0x1f) | (instr & 0x20); 886 val = regs->gpr[rd]; 887 if ((instr & 0x10) == 0) { 888 sh = rb | ((instr & 2) << 4); 889 val = ROTATE(val, sh); 890 switch ((instr >> 2) & 3) { 891 case 0: /* rldicl */ 892 regs->gpr[ra] = val & MASK64_L(mb); 893 goto logical_done; 894 case 1: /* rldicr */ 895 regs->gpr[ra] = val & MASK64_R(mb); 896 goto logical_done; 897 case 2: /* rldic */ 898 regs->gpr[ra] = val & MASK64(mb, 63 - sh); 899 goto logical_done; 900 case 3: /* rldimi */ 901 imm = MASK64(mb, 63 - sh); 902 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | 903 (val & imm); 904 goto logical_done; 905 } 906 } else { 907 sh = regs->gpr[rb] & 0x3f; 908 val = ROTATE(val, sh); 909 switch ((instr >> 1) & 7) { 910 case 0: /* rldcl */ 911 regs->gpr[ra] = val & MASK64_L(mb); 912 goto logical_done; 913 case 1: /* rldcr */ 914 regs->gpr[ra] = val & MASK64_R(mb); 915 goto logical_done; 916 } 917 } 918 #endif 919 break; /* illegal instruction */ 920 921 case 31: 922 switch ((instr >> 1) & 0x3ff) { 923 case 4: /* tw */ 924 if (rd == 0x1f || 925 (rd & trap_compare((int)regs->gpr[ra], 926 (int)regs->gpr[rb]))) 927 goto trap; 928 goto instr_done; 929 #ifdef __powerpc64__ 930 case 68: /* td */ 931 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 932 goto trap; 933 goto instr_done; 934 #endif 935 case 83: /* mfmsr */ 936 if (regs->msr & MSR_PR) 937 goto priv; 938 op->type = MFMSR; 939 op->reg = rd; 940 return 0; 941 case 146: /* mtmsr */ 942 if (regs->msr & MSR_PR) 943 goto priv; 944 op->type = MTMSR; 945 op->reg = rd; 946 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 947 return 0; 948 #ifdef CONFIG_PPC64 949 case 178: /* mtmsrd */ 950 if (regs->msr & MSR_PR) 951 goto priv; 952 op->type = MTMSR; 953 op->reg = rd; 954 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 955 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 956 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 957 op->val = imm; 958 return 0; 959 #endif 960 961 case 19: /* mfcr */ 962 regs->gpr[rd] = regs->ccr; 963 regs->gpr[rd] &= 0xffffffffUL; 964 goto instr_done; 965 966 case 144: /* mtcrf */ 967 imm = 0xf0000000UL; 968 val = regs->gpr[rd]; 969 for (sh = 0; sh < 8; ++sh) { 970 if (instr & (0x80000 >> sh)) 971 regs->ccr = (regs->ccr & ~imm) | 972 (val & imm); 973 imm >>= 4; 974 } 975 goto instr_done; 976 977 case 339: /* mfspr */ 978 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); 979 switch (spr) { 980 case SPRN_XER: /* mfxer */ 981 regs->gpr[rd] = regs->xer; 982 regs->gpr[rd] &= 0xffffffffUL; 983 goto instr_done; 984 case SPRN_LR: /* mflr */ 985 regs->gpr[rd] = regs->link; 986 goto instr_done; 987 case SPRN_CTR: /* mfctr */ 988 regs->gpr[rd] = regs->ctr; 989 goto instr_done; 990 default: 991 op->type = MFSPR; 992 op->reg = rd; 993 op->spr = spr; 994 return 0; 995 } 996 break; 997 998 case 467: /* mtspr */ 999 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); 1000 switch (spr) { 1001 case SPRN_XER: /* mtxer */ 1002 regs->xer = (regs->gpr[rd] & 0xffffffffUL); 1003 goto instr_done; 1004 case SPRN_LR: /* mtlr */ 1005 regs->link = regs->gpr[rd]; 1006 goto instr_done; 1007 case SPRN_CTR: /* mtctr */ 1008 regs->ctr = regs->gpr[rd]; 1009 goto instr_done; 1010 default: 1011 op->type = MTSPR; 1012 op->val = regs->gpr[rd]; 1013 op->spr = spr; 1014 return 0; 1015 } 1016 break; 1017 1018 /* 1019 * Compare instructions 1020 */ 1021 case 0: /* cmp */ 1022 val = regs->gpr[ra]; 1023 val2 = regs->gpr[rb]; 1024 #ifdef __powerpc64__ 1025 if ((rd & 1) == 0) { 1026 /* word (32-bit) compare */ 1027 val = (int) val; 1028 val2 = (int) val2; 1029 } 1030 #endif 1031 do_cmp_signed(regs, val, val2, rd >> 2); 1032 goto instr_done; 1033 1034 case 32: /* cmpl */ 1035 val = regs->gpr[ra]; 1036 val2 = regs->gpr[rb]; 1037 #ifdef __powerpc64__ 1038 if ((rd & 1) == 0) { 1039 /* word (32-bit) compare */ 1040 val = (unsigned int) val; 1041 val2 = (unsigned int) val2; 1042 } 1043 #endif 1044 do_cmp_unsigned(regs, val, val2, rd >> 2); 1045 goto instr_done; 1046 1047 /* 1048 * Arithmetic instructions 1049 */ 1050 case 8: /* subfc */ 1051 add_with_carry(regs, rd, ~regs->gpr[ra], 1052 regs->gpr[rb], 1); 1053 goto arith_done; 1054 #ifdef __powerpc64__ 1055 case 9: /* mulhdu */ 1056 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : 1057 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1058 goto arith_done; 1059 #endif 1060 case 10: /* addc */ 1061 add_with_carry(regs, rd, regs->gpr[ra], 1062 regs->gpr[rb], 0); 1063 goto arith_done; 1064 1065 case 11: /* mulhwu */ 1066 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : 1067 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1068 goto arith_done; 1069 1070 case 40: /* subf */ 1071 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; 1072 goto arith_done; 1073 #ifdef __powerpc64__ 1074 case 73: /* mulhd */ 1075 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : 1076 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1077 goto arith_done; 1078 #endif 1079 case 75: /* mulhw */ 1080 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : 1081 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1082 goto arith_done; 1083 1084 case 104: /* neg */ 1085 regs->gpr[rd] = -regs->gpr[ra]; 1086 goto arith_done; 1087 1088 case 136: /* subfe */ 1089 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], 1090 regs->xer & XER_CA); 1091 goto arith_done; 1092 1093 case 138: /* adde */ 1094 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], 1095 regs->xer & XER_CA); 1096 goto arith_done; 1097 1098 case 200: /* subfze */ 1099 add_with_carry(regs, rd, ~regs->gpr[ra], 0L, 1100 regs->xer & XER_CA); 1101 goto arith_done; 1102 1103 case 202: /* addze */ 1104 add_with_carry(regs, rd, regs->gpr[ra], 0L, 1105 regs->xer & XER_CA); 1106 goto arith_done; 1107 1108 case 232: /* subfme */ 1109 add_with_carry(regs, rd, ~regs->gpr[ra], -1L, 1110 regs->xer & XER_CA); 1111 goto arith_done; 1112 #ifdef __powerpc64__ 1113 case 233: /* mulld */ 1114 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; 1115 goto arith_done; 1116 #endif 1117 case 234: /* addme */ 1118 add_with_carry(regs, rd, regs->gpr[ra], -1L, 1119 regs->xer & XER_CA); 1120 goto arith_done; 1121 1122 case 235: /* mullw */ 1123 regs->gpr[rd] = (unsigned int) regs->gpr[ra] * 1124 (unsigned int) regs->gpr[rb]; 1125 goto arith_done; 1126 1127 case 266: /* add */ 1128 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; 1129 goto arith_done; 1130 #ifdef __powerpc64__ 1131 case 457: /* divdu */ 1132 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; 1133 goto arith_done; 1134 #endif 1135 case 459: /* divwu */ 1136 regs->gpr[rd] = (unsigned int) regs->gpr[ra] / 1137 (unsigned int) regs->gpr[rb]; 1138 goto arith_done; 1139 #ifdef __powerpc64__ 1140 case 489: /* divd */ 1141 regs->gpr[rd] = (long int) regs->gpr[ra] / 1142 (long int) regs->gpr[rb]; 1143 goto arith_done; 1144 #endif 1145 case 491: /* divw */ 1146 regs->gpr[rd] = (int) regs->gpr[ra] / 1147 (int) regs->gpr[rb]; 1148 goto arith_done; 1149 1150 1151 /* 1152 * Logical instructions 1153 */ 1154 case 26: /* cntlzw */ 1155 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : 1156 "r" (regs->gpr[rd])); 1157 goto logical_done; 1158 #ifdef __powerpc64__ 1159 case 58: /* cntlzd */ 1160 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : 1161 "r" (regs->gpr[rd])); 1162 goto logical_done; 1163 #endif 1164 case 28: /* and */ 1165 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; 1166 goto logical_done; 1167 1168 case 60: /* andc */ 1169 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; 1170 goto logical_done; 1171 1172 case 124: /* nor */ 1173 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); 1174 goto logical_done; 1175 1176 case 284: /* xor */ 1177 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); 1178 goto logical_done; 1179 1180 case 316: /* xor */ 1181 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; 1182 goto logical_done; 1183 1184 case 412: /* orc */ 1185 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; 1186 goto logical_done; 1187 1188 case 444: /* or */ 1189 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; 1190 goto logical_done; 1191 1192 case 476: /* nand */ 1193 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); 1194 goto logical_done; 1195 1196 case 922: /* extsh */ 1197 regs->gpr[ra] = (signed short) regs->gpr[rd]; 1198 goto logical_done; 1199 1200 case 954: /* extsb */ 1201 regs->gpr[ra] = (signed char) regs->gpr[rd]; 1202 goto logical_done; 1203 #ifdef __powerpc64__ 1204 case 986: /* extsw */ 1205 regs->gpr[ra] = (signed int) regs->gpr[rd]; 1206 goto logical_done; 1207 #endif 1208 1209 /* 1210 * Shift instructions 1211 */ 1212 case 24: /* slw */ 1213 sh = regs->gpr[rb] & 0x3f; 1214 if (sh < 32) 1215 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; 1216 else 1217 regs->gpr[ra] = 0; 1218 goto logical_done; 1219 1220 case 536: /* srw */ 1221 sh = regs->gpr[rb] & 0x3f; 1222 if (sh < 32) 1223 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; 1224 else 1225 regs->gpr[ra] = 0; 1226 goto logical_done; 1227 1228 case 792: /* sraw */ 1229 sh = regs->gpr[rb] & 0x3f; 1230 ival = (signed int) regs->gpr[rd]; 1231 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1232 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 1233 regs->xer |= XER_CA; 1234 else 1235 regs->xer &= ~XER_CA; 1236 goto logical_done; 1237 1238 case 824: /* srawi */ 1239 sh = rb; 1240 ival = (signed int) regs->gpr[rd]; 1241 regs->gpr[ra] = ival >> sh; 1242 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 1243 regs->xer |= XER_CA; 1244 else 1245 regs->xer &= ~XER_CA; 1246 goto logical_done; 1247 1248 #ifdef __powerpc64__ 1249 case 27: /* sld */ 1250 sh = regs->gpr[rb] & 0x7f; 1251 if (sh < 64) 1252 regs->gpr[ra] = regs->gpr[rd] << sh; 1253 else 1254 regs->gpr[ra] = 0; 1255 goto logical_done; 1256 1257 case 539: /* srd */ 1258 sh = regs->gpr[rb] & 0x7f; 1259 if (sh < 64) 1260 regs->gpr[ra] = regs->gpr[rd] >> sh; 1261 else 1262 regs->gpr[ra] = 0; 1263 goto logical_done; 1264 1265 case 794: /* srad */ 1266 sh = regs->gpr[rb] & 0x7f; 1267 ival = (signed long int) regs->gpr[rd]; 1268 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1269 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 1270 regs->xer |= XER_CA; 1271 else 1272 regs->xer &= ~XER_CA; 1273 goto logical_done; 1274 1275 case 826: /* sradi with sh_5 = 0 */ 1276 case 827: /* sradi with sh_5 = 1 */ 1277 sh = rb | ((instr & 2) << 4); 1278 ival = (signed long int) regs->gpr[rd]; 1279 regs->gpr[ra] = ival >> sh; 1280 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 1281 regs->xer |= XER_CA; 1282 else 1283 regs->xer &= ~XER_CA; 1284 goto logical_done; 1285 #endif /* __powerpc64__ */ 1286 1287 /* 1288 * Cache instructions 1289 */ 1290 case 54: /* dcbst */ 1291 op->type = MKOP(CACHEOP, DCBST, 0); 1292 op->ea = xform_ea(instr, regs); 1293 return 0; 1294 1295 case 86: /* dcbf */ 1296 op->type = MKOP(CACHEOP, DCBF, 0); 1297 op->ea = xform_ea(instr, regs); 1298 return 0; 1299 1300 case 246: /* dcbtst */ 1301 op->type = MKOP(CACHEOP, DCBTST, 0); 1302 op->ea = xform_ea(instr, regs); 1303 op->reg = rd; 1304 return 0; 1305 1306 case 278: /* dcbt */ 1307 op->type = MKOP(CACHEOP, DCBTST, 0); 1308 op->ea = xform_ea(instr, regs); 1309 op->reg = rd; 1310 return 0; 1311 1312 case 982: /* icbi */ 1313 op->type = MKOP(CACHEOP, ICBI, 0); 1314 op->ea = xform_ea(instr, regs); 1315 return 0; 1316 } 1317 break; 1318 } 1319 1320 /* 1321 * Loads and stores. 1322 */ 1323 op->type = UNKNOWN; 1324 op->update_reg = ra; 1325 op->reg = rd; 1326 op->val = regs->gpr[rd]; 1327 u = (instr >> 20) & UPDATE; 1328 1329 switch (opcode) { 1330 case 31: 1331 u = instr & UPDATE; 1332 op->ea = xform_ea(instr, regs); 1333 switch ((instr >> 1) & 0x3ff) { 1334 case 20: /* lwarx */ 1335 op->type = MKOP(LARX, 0, 4); 1336 break; 1337 1338 case 150: /* stwcx. */ 1339 op->type = MKOP(STCX, 0, 4); 1340 break; 1341 1342 #ifdef __powerpc64__ 1343 case 84: /* ldarx */ 1344 op->type = MKOP(LARX, 0, 8); 1345 break; 1346 1347 case 214: /* stdcx. */ 1348 op->type = MKOP(STCX, 0, 8); 1349 break; 1350 1351 case 21: /* ldx */ 1352 case 53: /* ldux */ 1353 op->type = MKOP(LOAD, u, 8); 1354 break; 1355 #endif 1356 1357 case 23: /* lwzx */ 1358 case 55: /* lwzux */ 1359 op->type = MKOP(LOAD, u, 4); 1360 break; 1361 1362 case 87: /* lbzx */ 1363 case 119: /* lbzux */ 1364 op->type = MKOP(LOAD, u, 1); 1365 break; 1366 1367 #ifdef CONFIG_ALTIVEC 1368 case 103: /* lvx */ 1369 case 359: /* lvxl */ 1370 if (!(regs->msr & MSR_VEC)) 1371 goto vecunavail; 1372 op->type = MKOP(LOAD_VMX, 0, 16); 1373 break; 1374 1375 case 231: /* stvx */ 1376 case 487: /* stvxl */ 1377 if (!(regs->msr & MSR_VEC)) 1378 goto vecunavail; 1379 op->type = MKOP(STORE_VMX, 0, 16); 1380 break; 1381 #endif /* CONFIG_ALTIVEC */ 1382 1383 #ifdef __powerpc64__ 1384 case 149: /* stdx */ 1385 case 181: /* stdux */ 1386 op->type = MKOP(STORE, u, 8); 1387 break; 1388 #endif 1389 1390 case 151: /* stwx */ 1391 case 183: /* stwux */ 1392 op->type = MKOP(STORE, u, 4); 1393 break; 1394 1395 case 215: /* stbx */ 1396 case 247: /* stbux */ 1397 op->type = MKOP(STORE, u, 1); 1398 break; 1399 1400 case 279: /* lhzx */ 1401 case 311: /* lhzux */ 1402 op->type = MKOP(LOAD, u, 2); 1403 break; 1404 1405 #ifdef __powerpc64__ 1406 case 341: /* lwax */ 1407 case 373: /* lwaux */ 1408 op->type = MKOP(LOAD, SIGNEXT | u, 4); 1409 break; 1410 #endif 1411 1412 case 343: /* lhax */ 1413 case 375: /* lhaux */ 1414 op->type = MKOP(LOAD, SIGNEXT | u, 2); 1415 break; 1416 1417 case 407: /* sthx */ 1418 case 439: /* sthux */ 1419 op->type = MKOP(STORE, u, 2); 1420 break; 1421 1422 #ifdef __powerpc64__ 1423 case 532: /* ldbrx */ 1424 op->type = MKOP(LOAD, BYTEREV, 8); 1425 break; 1426 1427 #endif 1428 case 533: /* lswx */ 1429 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 1430 break; 1431 1432 case 534: /* lwbrx */ 1433 op->type = MKOP(LOAD, BYTEREV, 4); 1434 break; 1435 1436 case 597: /* lswi */ 1437 if (rb == 0) 1438 rb = 32; /* # bytes to load */ 1439 op->type = MKOP(LOAD_MULTI, 0, rb); 1440 op->ea = 0; 1441 if (ra) 1442 op->ea = truncate_if_32bit(regs->msr, 1443 regs->gpr[ra]); 1444 break; 1445 1446 #ifdef CONFIG_PPC_FPU 1447 case 535: /* lfsx */ 1448 case 567: /* lfsux */ 1449 if (!(regs->msr & MSR_FP)) 1450 goto fpunavail; 1451 op->type = MKOP(LOAD_FP, u, 4); 1452 break; 1453 1454 case 599: /* lfdx */ 1455 case 631: /* lfdux */ 1456 if (!(regs->msr & MSR_FP)) 1457 goto fpunavail; 1458 op->type = MKOP(LOAD_FP, u, 8); 1459 break; 1460 1461 case 663: /* stfsx */ 1462 case 695: /* stfsux */ 1463 if (!(regs->msr & MSR_FP)) 1464 goto fpunavail; 1465 op->type = MKOP(STORE_FP, u, 4); 1466 break; 1467 1468 case 727: /* stfdx */ 1469 case 759: /* stfdux */ 1470 if (!(regs->msr & MSR_FP)) 1471 goto fpunavail; 1472 op->type = MKOP(STORE_FP, u, 8); 1473 break; 1474 #endif 1475 1476 #ifdef __powerpc64__ 1477 case 660: /* stdbrx */ 1478 op->type = MKOP(STORE, BYTEREV, 8); 1479 op->val = byterev_8(regs->gpr[rd]); 1480 break; 1481 1482 #endif 1483 case 661: /* stswx */ 1484 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 1485 break; 1486 1487 case 662: /* stwbrx */ 1488 op->type = MKOP(STORE, BYTEREV, 4); 1489 op->val = byterev_4(regs->gpr[rd]); 1490 break; 1491 1492 case 725: 1493 if (rb == 0) 1494 rb = 32; /* # bytes to store */ 1495 op->type = MKOP(STORE_MULTI, 0, rb); 1496 op->ea = 0; 1497 if (ra) 1498 op->ea = truncate_if_32bit(regs->msr, 1499 regs->gpr[ra]); 1500 break; 1501 1502 case 790: /* lhbrx */ 1503 op->type = MKOP(LOAD, BYTEREV, 2); 1504 break; 1505 1506 case 918: /* sthbrx */ 1507 op->type = MKOP(STORE, BYTEREV, 2); 1508 op->val = byterev_2(regs->gpr[rd]); 1509 break; 1510 1511 #ifdef CONFIG_VSX 1512 case 844: /* lxvd2x */ 1513 case 876: /* lxvd2ux */ 1514 if (!(regs->msr & MSR_VSX)) 1515 goto vsxunavail; 1516 op->reg = rd | ((instr & 1) << 5); 1517 op->type = MKOP(LOAD_VSX, u, 16); 1518 break; 1519 1520 case 972: /* stxvd2x */ 1521 case 1004: /* stxvd2ux */ 1522 if (!(regs->msr & MSR_VSX)) 1523 goto vsxunavail; 1524 op->reg = rd | ((instr & 1) << 5); 1525 op->type = MKOP(STORE_VSX, u, 16); 1526 break; 1527 1528 #endif /* CONFIG_VSX */ 1529 } 1530 break; 1531 1532 case 32: /* lwz */ 1533 case 33: /* lwzu */ 1534 op->type = MKOP(LOAD, u, 4); 1535 op->ea = dform_ea(instr, regs); 1536 break; 1537 1538 case 34: /* lbz */ 1539 case 35: /* lbzu */ 1540 op->type = MKOP(LOAD, u, 1); 1541 op->ea = dform_ea(instr, regs); 1542 break; 1543 1544 case 36: /* stw */ 1545 case 37: /* stwu */ 1546 op->type = MKOP(STORE, u, 4); 1547 op->ea = dform_ea(instr, regs); 1548 break; 1549 1550 case 38: /* stb */ 1551 case 39: /* stbu */ 1552 op->type = MKOP(STORE, u, 1); 1553 op->ea = dform_ea(instr, regs); 1554 break; 1555 1556 case 40: /* lhz */ 1557 case 41: /* lhzu */ 1558 op->type = MKOP(LOAD, u, 2); 1559 op->ea = dform_ea(instr, regs); 1560 break; 1561 1562 case 42: /* lha */ 1563 case 43: /* lhau */ 1564 op->type = MKOP(LOAD, SIGNEXT | u, 2); 1565 op->ea = dform_ea(instr, regs); 1566 break; 1567 1568 case 44: /* sth */ 1569 case 45: /* sthu */ 1570 op->type = MKOP(STORE, u, 2); 1571 op->ea = dform_ea(instr, regs); 1572 break; 1573 1574 case 46: /* lmw */ 1575 if (ra >= rd) 1576 break; /* invalid form, ra in range to load */ 1577 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 1578 op->ea = dform_ea(instr, regs); 1579 break; 1580 1581 case 47: /* stmw */ 1582 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 1583 op->ea = dform_ea(instr, regs); 1584 break; 1585 1586 #ifdef CONFIG_PPC_FPU 1587 case 48: /* lfs */ 1588 case 49: /* lfsu */ 1589 if (!(regs->msr & MSR_FP)) 1590 goto fpunavail; 1591 op->type = MKOP(LOAD_FP, u, 4); 1592 op->ea = dform_ea(instr, regs); 1593 break; 1594 1595 case 50: /* lfd */ 1596 case 51: /* lfdu */ 1597 if (!(regs->msr & MSR_FP)) 1598 goto fpunavail; 1599 op->type = MKOP(LOAD_FP, u, 8); 1600 op->ea = dform_ea(instr, regs); 1601 break; 1602 1603 case 52: /* stfs */ 1604 case 53: /* stfsu */ 1605 if (!(regs->msr & MSR_FP)) 1606 goto fpunavail; 1607 op->type = MKOP(STORE_FP, u, 4); 1608 op->ea = dform_ea(instr, regs); 1609 break; 1610 1611 case 54: /* stfd */ 1612 case 55: /* stfdu */ 1613 if (!(regs->msr & MSR_FP)) 1614 goto fpunavail; 1615 op->type = MKOP(STORE_FP, u, 8); 1616 op->ea = dform_ea(instr, regs); 1617 break; 1618 #endif 1619 1620 #ifdef __powerpc64__ 1621 case 58: /* ld[u], lwa */ 1622 op->ea = dsform_ea(instr, regs); 1623 switch (instr & 3) { 1624 case 0: /* ld */ 1625 op->type = MKOP(LOAD, 0, 8); 1626 break; 1627 case 1: /* ldu */ 1628 op->type = MKOP(LOAD, UPDATE, 8); 1629 break; 1630 case 2: /* lwa */ 1631 op->type = MKOP(LOAD, SIGNEXT, 4); 1632 break; 1633 } 1634 break; 1635 1636 case 62: /* std[u] */ 1637 op->ea = dsform_ea(instr, regs); 1638 switch (instr & 3) { 1639 case 0: /* std */ 1640 op->type = MKOP(STORE, 0, 8); 1641 break; 1642 case 1: /* stdu */ 1643 op->type = MKOP(STORE, UPDATE, 8); 1644 break; 1645 } 1646 break; 1647 #endif /* __powerpc64__ */ 1648 1649 } 1650 return 0; 1651 1652 logical_done: 1653 if (instr & 1) 1654 set_cr0(regs, ra); 1655 goto instr_done; 1656 1657 arith_done: 1658 if (instr & 1) 1659 set_cr0(regs, rd); 1660 1661 instr_done: 1662 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 1663 return 1; 1664 1665 priv: 1666 op->type = INTERRUPT | 0x700; 1667 op->val = SRR1_PROGPRIV; 1668 return 0; 1669 1670 trap: 1671 op->type = INTERRUPT | 0x700; 1672 op->val = SRR1_PROGTRAP; 1673 return 0; 1674 1675 #ifdef CONFIG_PPC_FPU 1676 fpunavail: 1677 op->type = INTERRUPT | 0x800; 1678 return 0; 1679 #endif 1680 1681 #ifdef CONFIG_ALTIVEC 1682 vecunavail: 1683 op->type = INTERRUPT | 0xf20; 1684 return 0; 1685 #endif 1686 1687 #ifdef CONFIG_VSX 1688 vsxunavail: 1689 op->type = INTERRUPT | 0xf40; 1690 return 0; 1691 #endif 1692 } 1693 EXPORT_SYMBOL_GPL(analyse_instr); 1694 1695 /* 1696 * For PPC32 we always use stwu with r1 to change the stack pointer. 1697 * So this emulated store may corrupt the exception frame, now we 1698 * have to provide the exception frame trampoline, which is pushed 1699 * below the kprobed function stack. So we only update gpr[1] but 1700 * don't emulate the real store operation. We will do real store 1701 * operation safely in exception return code by checking this flag. 1702 */ 1703 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs) 1704 { 1705 #ifdef CONFIG_PPC32 1706 /* 1707 * Check if we will touch kernel stack overflow 1708 */ 1709 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { 1710 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n"); 1711 return -EINVAL; 1712 } 1713 #endif /* CONFIG_PPC32 */ 1714 /* 1715 * Check if we already set since that means we'll 1716 * lose the previous value. 1717 */ 1718 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 1719 set_thread_flag(TIF_EMULATE_STACK_STORE); 1720 return 0; 1721 } 1722 1723 static __kprobes void do_signext(unsigned long *valp, int size) 1724 { 1725 switch (size) { 1726 case 2: 1727 *valp = (signed short) *valp; 1728 break; 1729 case 4: 1730 *valp = (signed int) *valp; 1731 break; 1732 } 1733 } 1734 1735 static __kprobes void do_byterev(unsigned long *valp, int size) 1736 { 1737 switch (size) { 1738 case 2: 1739 *valp = byterev_2(*valp); 1740 break; 1741 case 4: 1742 *valp = byterev_4(*valp); 1743 break; 1744 #ifdef __powerpc64__ 1745 case 8: 1746 *valp = byterev_8(*valp); 1747 break; 1748 #endif 1749 } 1750 } 1751 1752 /* 1753 * Emulate instructions that cause a transfer of control, 1754 * loads and stores, and a few other instructions. 1755 * Returns 1 if the step was emulated, 0 if not, 1756 * or -1 if the instruction is one that should not be stepped, 1757 * such as an rfid, or a mtmsrd that would clear MSR_RI. 1758 */ 1759 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) 1760 { 1761 struct instruction_op op; 1762 int r, err, size; 1763 unsigned long val; 1764 unsigned int cr; 1765 int i, rd, nb; 1766 1767 r = analyse_instr(&op, regs, instr); 1768 if (r != 0) 1769 return r; 1770 1771 err = 0; 1772 size = GETSIZE(op.type); 1773 switch (op.type & INSTR_TYPE_MASK) { 1774 case CACHEOP: 1775 if (!address_ok(regs, op.ea, 8)) 1776 return 0; 1777 switch (op.type & CACHEOP_MASK) { 1778 case DCBST: 1779 __cacheop_user_asmx(op.ea, err, "dcbst"); 1780 break; 1781 case DCBF: 1782 __cacheop_user_asmx(op.ea, err, "dcbf"); 1783 break; 1784 case DCBTST: 1785 if (op.reg == 0) 1786 prefetchw((void *) op.ea); 1787 break; 1788 case DCBT: 1789 if (op.reg == 0) 1790 prefetch((void *) op.ea); 1791 break; 1792 case ICBI: 1793 __cacheop_user_asmx(op.ea, err, "icbi"); 1794 break; 1795 } 1796 if (err) 1797 return 0; 1798 goto instr_done; 1799 1800 case LARX: 1801 if (regs->msr & MSR_LE) 1802 return 0; 1803 if (op.ea & (size - 1)) 1804 break; /* can't handle misaligned */ 1805 err = -EFAULT; 1806 if (!address_ok(regs, op.ea, size)) 1807 goto ldst_done; 1808 err = 0; 1809 switch (size) { 1810 case 4: 1811 __get_user_asmx(val, op.ea, err, "lwarx"); 1812 break; 1813 #ifdef __powerpc64__ 1814 case 8: 1815 __get_user_asmx(val, op.ea, err, "ldarx"); 1816 break; 1817 #endif 1818 default: 1819 return 0; 1820 } 1821 if (!err) 1822 regs->gpr[op.reg] = val; 1823 goto ldst_done; 1824 1825 case STCX: 1826 if (regs->msr & MSR_LE) 1827 return 0; 1828 if (op.ea & (size - 1)) 1829 break; /* can't handle misaligned */ 1830 err = -EFAULT; 1831 if (!address_ok(regs, op.ea, size)) 1832 goto ldst_done; 1833 err = 0; 1834 switch (size) { 1835 case 4: 1836 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr); 1837 break; 1838 #ifdef __powerpc64__ 1839 case 8: 1840 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr); 1841 break; 1842 #endif 1843 default: 1844 return 0; 1845 } 1846 if (!err) 1847 regs->ccr = (regs->ccr & 0x0fffffff) | 1848 (cr & 0xe0000000) | 1849 ((regs->xer >> 3) & 0x10000000); 1850 goto ldst_done; 1851 1852 case LOAD: 1853 if (regs->msr & MSR_LE) 1854 return 0; 1855 err = read_mem(®s->gpr[op.reg], op.ea, size, regs); 1856 if (!err) { 1857 if (op.type & SIGNEXT) 1858 do_signext(®s->gpr[op.reg], size); 1859 if (op.type & BYTEREV) 1860 do_byterev(®s->gpr[op.reg], size); 1861 } 1862 goto ldst_done; 1863 1864 #ifdef CONFIG_PPC_FPU 1865 case LOAD_FP: 1866 if (regs->msr & MSR_LE) 1867 return 0; 1868 if (size == 4) 1869 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); 1870 else 1871 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); 1872 goto ldst_done; 1873 #endif 1874 #ifdef CONFIG_ALTIVEC 1875 case LOAD_VMX: 1876 if (regs->msr & MSR_LE) 1877 return 0; 1878 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); 1879 goto ldst_done; 1880 #endif 1881 #ifdef CONFIG_VSX 1882 case LOAD_VSX: 1883 if (regs->msr & MSR_LE) 1884 return 0; 1885 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); 1886 goto ldst_done; 1887 #endif 1888 case LOAD_MULTI: 1889 if (regs->msr & MSR_LE) 1890 return 0; 1891 rd = op.reg; 1892 for (i = 0; i < size; i += 4) { 1893 nb = size - i; 1894 if (nb > 4) 1895 nb = 4; 1896 err = read_mem(®s->gpr[rd], op.ea, nb, regs); 1897 if (err) 1898 return 0; 1899 if (nb < 4) /* left-justify last bytes */ 1900 regs->gpr[rd] <<= 32 - 8 * nb; 1901 op.ea += 4; 1902 ++rd; 1903 } 1904 goto instr_done; 1905 1906 case STORE: 1907 if (regs->msr & MSR_LE) 1908 return 0; 1909 if ((op.type & UPDATE) && size == sizeof(long) && 1910 op.reg == 1 && op.update_reg == 1 && 1911 !(regs->msr & MSR_PR) && 1912 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 1913 err = handle_stack_update(op.ea, regs); 1914 goto ldst_done; 1915 } 1916 err = write_mem(op.val, op.ea, size, regs); 1917 goto ldst_done; 1918 1919 #ifdef CONFIG_PPC_FPU 1920 case STORE_FP: 1921 if (regs->msr & MSR_LE) 1922 return 0; 1923 if (size == 4) 1924 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); 1925 else 1926 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); 1927 goto ldst_done; 1928 #endif 1929 #ifdef CONFIG_ALTIVEC 1930 case STORE_VMX: 1931 if (regs->msr & MSR_LE) 1932 return 0; 1933 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); 1934 goto ldst_done; 1935 #endif 1936 #ifdef CONFIG_VSX 1937 case STORE_VSX: 1938 if (regs->msr & MSR_LE) 1939 return 0; 1940 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); 1941 goto ldst_done; 1942 #endif 1943 case STORE_MULTI: 1944 if (regs->msr & MSR_LE) 1945 return 0; 1946 rd = op.reg; 1947 for (i = 0; i < size; i += 4) { 1948 val = regs->gpr[rd]; 1949 nb = size - i; 1950 if (nb > 4) 1951 nb = 4; 1952 else 1953 val >>= 32 - 8 * nb; 1954 err = write_mem(val, op.ea, nb, regs); 1955 if (err) 1956 return 0; 1957 op.ea += 4; 1958 ++rd; 1959 } 1960 goto instr_done; 1961 1962 case MFMSR: 1963 regs->gpr[op.reg] = regs->msr & MSR_MASK; 1964 goto instr_done; 1965 1966 case MTMSR: 1967 val = regs->gpr[op.reg]; 1968 if ((val & MSR_RI) == 0) 1969 /* can't step mtmsr[d] that would clear MSR_RI */ 1970 return -1; 1971 /* here op.val is the mask of bits to change */ 1972 regs->msr = (regs->msr & ~op.val) | (val & op.val); 1973 goto instr_done; 1974 1975 #ifdef CONFIG_PPC64 1976 case SYSCALL: /* sc */ 1977 /* 1978 * N.B. this uses knowledge about how the syscall 1979 * entry code works. If that is changed, this will 1980 * need to be changed also. 1981 */ 1982 if (regs->gpr[0] == 0x1ebe && 1983 cpu_has_feature(CPU_FTR_REAL_LE)) { 1984 regs->msr ^= MSR_LE; 1985 goto instr_done; 1986 } 1987 regs->gpr[9] = regs->gpr[13]; 1988 regs->gpr[10] = MSR_KERNEL; 1989 regs->gpr[11] = regs->nip + 4; 1990 regs->gpr[12] = regs->msr & MSR_MASK; 1991 regs->gpr[13] = (unsigned long) get_paca(); 1992 regs->nip = (unsigned long) &system_call_common; 1993 regs->msr = MSR_KERNEL; 1994 return 1; 1995 1996 case RFI: 1997 return -1; 1998 #endif 1999 } 2000 return 0; 2001 2002 ldst_done: 2003 if (err) 2004 return 0; 2005 if (op.type & UPDATE) 2006 regs->gpr[op.update_reg] = op.ea; 2007 2008 instr_done: 2009 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 2010 return 1; 2011 } 2012