sstep.c (350779a29f11f80ac66a8b38a7718ad30f003f18) | sstep.c (d120cdbce68c3739f94f733bec376460fb9cbc14) |
---|---|
1/* 2 * Single-step support. 3 * 4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version --- 112 unchanged lines hidden (view full) --- 121 int ra; 122 unsigned long ea; 123 124 ra = (instr >> 16) & 0x1f; 125 ea = (signed short) instr; /* sign-extend */ 126 if (ra) 127 ea += regs->gpr[ra]; 128 | 1/* 2 * Single-step support. 3 * 4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version --- 112 unchanged lines hidden (view full) --- 121 int ra; 122 unsigned long ea; 123 124 ra = (instr >> 16) & 0x1f; 125 ea = (signed short) instr; /* sign-extend */ 126 if (ra) 127 ea += regs->gpr[ra]; 128 |
129 return truncate_if_32bit(regs->msr, ea); | 129 return ea; |
130} 131 132#ifdef __powerpc64__ 133/* 134 * Calculate effective address for a DS-form instruction 135 */ 136static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 137 const struct pt_regs *regs) 138{ 139 int ra; 140 unsigned long ea; 141 142 ra = (instr >> 16) & 0x1f; 143 ea = (signed short) (instr & ~3); /* sign-extend */ 144 if (ra) 145 ea += regs->gpr[ra]; 146 | 130} 131 132#ifdef __powerpc64__ 133/* 134 * Calculate effective address for a DS-form instruction 135 */ 136static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 137 const struct pt_regs *regs) 138{ 139 int ra; 140 unsigned long ea; 141 142 ra = (instr >> 16) & 0x1f; 143 ea = (signed short) (instr & ~3); /* sign-extend */ 144 if (ra) 145 ea += regs->gpr[ra]; 146 |
147 return truncate_if_32bit(regs->msr, ea); | 147 return ea; |
148} 149 150/* 151 * Calculate effective address for a DQ-form instruction 152 */ 153static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 154 const struct pt_regs *regs) 155{ 156 int ra; 157 unsigned long ea; 158 159 ra = (instr >> 16) & 0x1f; 160 ea = (signed short) (instr & ~0xf); /* sign-extend */ 161 if (ra) 162 ea += regs->gpr[ra]; 163 | 148} 149 150/* 151 * Calculate effective address for a DQ-form instruction 152 */ 153static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 154 const struct pt_regs *regs) 155{ 156 int ra; 157 unsigned long ea; 158 159 ra = (instr >> 16) & 0x1f; 160 ea = (signed short) (instr & ~0xf); /* sign-extend */ 161 if (ra) 162 ea += regs->gpr[ra]; 163 |
164 return truncate_if_32bit(regs->msr, ea); | 164 return ea; |
165} 166#endif /* __powerpc64 */ 167 168/* 169 * Calculate effective address for an X-form instruction 170 */ 171static nokprobe_inline unsigned long xform_ea(unsigned int instr, 172 const struct pt_regs *regs) 173{ 174 int ra, rb; 175 unsigned long ea; 176 177 ra = (instr >> 16) & 0x1f; 178 rb = (instr >> 11) & 0x1f; 179 ea = regs->gpr[rb]; 180 if (ra) 181 ea += regs->gpr[ra]; 182 | 165} 166#endif /* __powerpc64 */ 167 168/* 169 * Calculate effective address for an X-form instruction 170 */ 171static nokprobe_inline unsigned long xform_ea(unsigned int instr, 172 const struct pt_regs *regs) 173{ 174 int ra, rb; 175 unsigned long ea; 176 177 ra = (instr >> 16) & 0x1f; 178 rb = (instr >> 11) & 0x1f; 179 ea = regs->gpr[rb]; 180 if (ra) 181 ea += regs->gpr[ra]; 182 |
183 return truncate_if_32bit(regs->msr, ea); | 183 return ea; |
184} 185 186/* 187 * Return the largest power of 2, not greater than sizeof(unsigned long), 188 * such that x is a multiple of it. 189 */ 190static nokprobe_inline unsigned long max_align(unsigned long x) 191{ --- 1592 unchanged lines hidden (view full) --- 1784 case 534: /* lwbrx */ 1785 op->type = MKOP(LOAD, BYTEREV, 4); 1786 break; 1787 1788 case 597: /* lswi */ 1789 if (rb == 0) 1790 rb = 32; /* # bytes to load */ 1791 op->type = MKOP(LOAD_MULTI, 0, rb); | 184} 185 186/* 187 * Return the largest power of 2, not greater than sizeof(unsigned long), 188 * such that x is a multiple of it. 189 */ 190static nokprobe_inline unsigned long max_align(unsigned long x) 191{ --- 1592 unchanged lines hidden (view full) --- 1784 case 534: /* lwbrx */ 1785 op->type = MKOP(LOAD, BYTEREV, 4); 1786 break; 1787 1788 case 597: /* lswi */ 1789 if (rb == 0) 1790 rb = 32; /* # bytes to load */ 1791 op->type = MKOP(LOAD_MULTI, 0, rb); |
1792 op->ea = 0; 1793 if (ra) 1794 op->ea = truncate_if_32bit(regs->msr, 1795 regs->gpr[ra]); | 1792 op->ea = ra ? regs->gpr[ra] : 0; |
1796 break; 1797 1798#ifdef CONFIG_PPC_FPU 1799 case 535: /* lfsx */ 1800 case 567: /* lfsux */ 1801 op->type = MKOP(LOAD_FP, u, 4); 1802 break; 1803 --- 28 unchanged lines hidden (view full) --- 1832 op->type = MKOP(STORE, BYTEREV, 4); 1833 op->val = byterev_4(regs->gpr[rd]); 1834 break; 1835 1836 case 725: 1837 if (rb == 0) 1838 rb = 32; /* # bytes to store */ 1839 op->type = MKOP(STORE_MULTI, 0, rb); | 1793 break; 1794 1795#ifdef CONFIG_PPC_FPU 1796 case 535: /* lfsx */ 1797 case 567: /* lfsux */ 1798 op->type = MKOP(LOAD_FP, u, 4); 1799 break; 1800 --- 28 unchanged lines hidden (view full) --- 1829 op->type = MKOP(STORE, BYTEREV, 4); 1830 op->val = byterev_4(regs->gpr[rd]); 1831 break; 1832 1833 case 725: 1834 if (rb == 0) 1835 rb = 32; /* # bytes to store */ 1836 op->type = MKOP(STORE_MULTI, 0, rb); |
1840 op->ea = 0; 1841 if (ra) 1842 op->ea = truncate_if_32bit(regs->msr, 1843 regs->gpr[ra]); | 1837 op->ea = ra ? regs->gpr[ra] : 0; |
1844 break; 1845 1846 case 790: /* lhbrx */ 1847 op->type = MKOP(LOAD, BYTEREV, 2); 1848 break; 1849 1850 case 918: /* sthbrx */ 1851 op->type = MKOP(STORE, BYTEREV, 2); --- 550 unchanged lines hidden (view full) --- 2402 * loads and stores, and a few other instructions. 2403 * Returns 1 if the step was emulated, 0 if not, 2404 * or -1 if the instruction is one that should not be stepped, 2405 * such as an rfid, or a mtmsrd that would clear MSR_RI. 2406 */ 2407int emulate_step(struct pt_regs *regs, unsigned int instr) 2408{ 2409 struct instruction_op op; | 1838 break; 1839 1840 case 790: /* lhbrx */ 1841 op->type = MKOP(LOAD, BYTEREV, 2); 1842 break; 1843 1844 case 918: /* sthbrx */ 1845 op->type = MKOP(STORE, BYTEREV, 2); --- 550 unchanged lines hidden (view full) --- 2396 * loads and stores, and a few other instructions. 2397 * Returns 1 if the step was emulated, 0 if not, 2398 * or -1 if the instruction is one that should not be stepped, 2399 * such as an rfid, or a mtmsrd that would clear MSR_RI. 2400 */ 2401int emulate_step(struct pt_regs *regs, unsigned int instr) 2402{ 2403 struct instruction_op op; |
2410 int r, err, size; | 2404 int r, err, size, type; |
2411 unsigned long val; 2412 unsigned int cr; 2413 int i, rd, nb; | 2405 unsigned long val; 2406 unsigned int cr; 2407 int i, rd, nb; |
2408 unsigned long ea; |
|
2414 2415 r = analyse_instr(&op, regs, instr); 2416 if (r < 0) 2417 return r; 2418 if (r > 0) { 2419 emulate_update_regs(regs, &op); 2420 return 1; 2421 } 2422 2423 err = 0; 2424 size = GETSIZE(op.type); | 2409 2410 r = analyse_instr(&op, regs, instr); 2411 if (r < 0) 2412 return r; 2413 if (r > 0) { 2414 emulate_update_regs(regs, &op); 2415 return 1; 2416 } 2417 2418 err = 0; 2419 size = GETSIZE(op.type); |
2425 switch (op.type & INSTR_TYPE_MASK) { | 2420 type = op.type & INSTR_TYPE_MASK; 2421 2422 ea = op.ea; 2423 if (OP_IS_LOAD_STORE(type) || type == CACHEOP) 2424 ea = truncate_if_32bit(regs->msr, op.ea); 2425 2426 switch (type) { |
2426 case CACHEOP: | 2427 case CACHEOP: |
2427 if (!address_ok(regs, op.ea, 8)) | 2428 if (!address_ok(regs, ea, 8)) |
2428 return 0; 2429 switch (op.type & CACHEOP_MASK) { 2430 case DCBST: | 2429 return 0; 2430 switch (op.type & CACHEOP_MASK) { 2431 case DCBST: |
2431 __cacheop_user_asmx(op.ea, err, "dcbst"); | 2432 __cacheop_user_asmx(ea, err, "dcbst"); |
2432 break; 2433 case DCBF: | 2433 break; 2434 case DCBF: |
2434 __cacheop_user_asmx(op.ea, err, "dcbf"); | 2435 __cacheop_user_asmx(ea, err, "dcbf"); |
2435 break; 2436 case DCBTST: 2437 if (op.reg == 0) | 2436 break; 2437 case DCBTST: 2438 if (op.reg == 0) |
2438 prefetchw((void *) op.ea); | 2439 prefetchw((void *) ea); |
2439 break; 2440 case DCBT: 2441 if (op.reg == 0) | 2440 break; 2441 case DCBT: 2442 if (op.reg == 0) |
2442 prefetch((void *) op.ea); | 2443 prefetch((void *) ea); |
2443 break; 2444 case ICBI: | 2444 break; 2445 case ICBI: |
2445 __cacheop_user_asmx(op.ea, err, "icbi"); | 2446 __cacheop_user_asmx(ea, err, "icbi"); |
2446 break; 2447 } 2448 if (err) 2449 return 0; 2450 goto instr_done; 2451 2452 case LARX: | 2447 break; 2448 } 2449 if (err) 2450 return 0; 2451 goto instr_done; 2452 2453 case LARX: |
2453 if (op.ea & (size - 1)) | 2454 if (ea & (size - 1)) |
2454 break; /* can't handle misaligned */ | 2455 break; /* can't handle misaligned */ |
2455 if (!address_ok(regs, op.ea, size)) | 2456 if (!address_ok(regs, ea, size)) |
2456 return 0; 2457 err = 0; 2458 switch (size) { 2459#ifdef __powerpc64__ 2460 case 1: | 2457 return 0; 2458 err = 0; 2459 switch (size) { 2460#ifdef __powerpc64__ 2461 case 1: |
2461 __get_user_asmx(val, op.ea, err, "lbarx"); | 2462 __get_user_asmx(val, ea, err, "lbarx"); |
2462 break; 2463 case 2: | 2463 break; 2464 case 2: |
2464 __get_user_asmx(val, op.ea, err, "lharx"); | 2465 __get_user_asmx(val, ea, err, "lharx"); |
2465 break; 2466#endif 2467 case 4: | 2466 break; 2467#endif 2468 case 4: |
2468 __get_user_asmx(val, op.ea, err, "lwarx"); | 2469 __get_user_asmx(val, ea, err, "lwarx"); |
2469 break; 2470#ifdef __powerpc64__ 2471 case 8: | 2470 break; 2471#ifdef __powerpc64__ 2472 case 8: |
2472 __get_user_asmx(val, op.ea, err, "ldarx"); | 2473 __get_user_asmx(val, ea, err, "ldarx"); |
2473 break; 2474 case 16: | 2474 break; 2475 case 16: |
2475 err = do_lqarx(op.ea, ®s->gpr[op.reg]); | 2476 err = do_lqarx(ea, ®s->gpr[op.reg]); |
2476 goto ldst_done; 2477#endif 2478 default: 2479 return 0; 2480 } 2481 if (!err) 2482 regs->gpr[op.reg] = val; 2483 goto ldst_done; 2484 2485 case STCX: | 2477 goto ldst_done; 2478#endif 2479 default: 2480 return 0; 2481 } 2482 if (!err) 2483 regs->gpr[op.reg] = val; 2484 goto ldst_done; 2485 2486 case STCX: |
2486 if (op.ea & (size - 1)) | 2487 if (ea & (size - 1)) |
2487 break; /* can't handle misaligned */ | 2488 break; /* can't handle misaligned */ |
2488 if (!address_ok(regs, op.ea, size)) | 2489 if (!address_ok(regs, ea, size)) |
2489 return 0; 2490 err = 0; 2491 switch (size) { 2492#ifdef __powerpc64__ 2493 case 1: | 2490 return 0; 2491 err = 0; 2492 switch (size) { 2493#ifdef __powerpc64__ 2494 case 1: |
2494 __put_user_asmx(op.val, op.ea, err, "stbcx.", cr); | 2495 __put_user_asmx(op.val, ea, err, "stbcx.", cr); |
2495 break; 2496 case 2: | 2496 break; 2497 case 2: |
2497 __put_user_asmx(op.val, op.ea, err, "stbcx.", cr); | 2498 __put_user_asmx(op.val, ea, err, "stbcx.", cr); |
2498 break; 2499#endif 2500 case 4: | 2499 break; 2500#endif 2501 case 4: |
2501 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr); | 2502 __put_user_asmx(op.val, ea, err, "stwcx.", cr); |
2502 break; 2503#ifdef __powerpc64__ 2504 case 8: | 2503 break; 2504#ifdef __powerpc64__ 2505 case 8: |
2505 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr); | 2506 __put_user_asmx(op.val, ea, err, "stdcx.", cr); |
2506 break; 2507 case 16: | 2507 break; 2508 case 16: |
2508 err = do_stqcx(op.ea, regs->gpr[op.reg], | 2509 err = do_stqcx(ea, regs->gpr[op.reg], |
2509 regs->gpr[op.reg + 1], &cr); 2510 break; 2511#endif 2512 default: 2513 return 0; 2514 } 2515 if (!err) 2516 regs->ccr = (regs->ccr & 0x0fffffff) | 2517 (cr & 0xe0000000) | 2518 ((regs->xer >> 3) & 0x10000000); 2519 goto ldst_done; 2520 2521 case LOAD: 2522#ifdef __powerpc64__ 2523 if (size == 16) { | 2510 regs->gpr[op.reg + 1], &cr); 2511 break; 2512#endif 2513 default: 2514 return 0; 2515 } 2516 if (!err) 2517 regs->ccr = (regs->ccr & 0x0fffffff) | 2518 (cr & 0xe0000000) | 2519 ((regs->xer >> 3) & 0x10000000); 2520 goto ldst_done; 2521 2522 case LOAD: 2523#ifdef __powerpc64__ 2524 if (size == 16) { |
2524 err = emulate_lq(regs, op.ea, op.reg); | 2525 err = emulate_lq(regs, ea, op.reg); |
2525 goto ldst_done; 2526 } 2527#endif | 2526 goto ldst_done; 2527 } 2528#endif |
2528 err = read_mem(®s->gpr[op.reg], op.ea, size, regs); | 2529 err = read_mem(®s->gpr[op.reg], ea, size, regs); |
2529 if (!err) { 2530 if (op.type & SIGNEXT) 2531 do_signext(®s->gpr[op.reg], size); 2532 if (op.type & BYTEREV) 2533 do_byterev(®s->gpr[op.reg], size); 2534 } 2535 goto ldst_done; 2536 2537#ifdef CONFIG_PPC_FPU 2538 case LOAD_FP: 2539 if (!(regs->msr & MSR_FP)) 2540 return 0; 2541 if (size == 4) | 2530 if (!err) { 2531 if (op.type & SIGNEXT) 2532 do_signext(®s->gpr[op.reg], size); 2533 if (op.type & BYTEREV) 2534 do_byterev(®s->gpr[op.reg], size); 2535 } 2536 goto ldst_done; 2537 2538#ifdef CONFIG_PPC_FPU 2539 case LOAD_FP: 2540 if (!(regs->msr & MSR_FP)) 2541 return 0; 2542 if (size == 4) |
2542 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); | 2543 err = do_fp_load(op.reg, do_lfs, ea, size, regs); |
2543 else | 2544 else |
2544 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); | 2545 err = do_fp_load(op.reg, do_lfd, ea, size, regs); |
2545 goto ldst_done; 2546#endif 2547#ifdef CONFIG_ALTIVEC 2548 case LOAD_VMX: 2549 if (!(regs->msr & MSR_VEC)) 2550 return 0; | 2546 goto ldst_done; 2547#endif 2548#ifdef CONFIG_ALTIVEC 2549 case LOAD_VMX: 2550 if (!(regs->msr & MSR_VEC)) 2551 return 0; |
2551 err = do_vec_load(op.reg, do_lvx, op.ea, regs); | 2552 err = do_vec_load(op.reg, do_lvx, ea, regs); |
2552 goto ldst_done; 2553#endif 2554#ifdef CONFIG_VSX 2555 case LOAD_VSX: { 2556 char mem[16]; 2557 union vsx_reg buf; 2558 unsigned long msrbit = MSR_VSX; 2559 2560 /* 2561 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 2562 * when the target of the instruction is a vector register. 2563 */ 2564 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) 2565 msrbit = MSR_VEC; 2566 if (!(regs->msr & msrbit)) 2567 return 0; | 2553 goto ldst_done; 2554#endif 2555#ifdef CONFIG_VSX 2556 case LOAD_VSX: { 2557 char mem[16]; 2558 union vsx_reg buf; 2559 unsigned long msrbit = MSR_VSX; 2560 2561 /* 2562 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 2563 * when the target of the instruction is a vector register. 2564 */ 2565 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) 2566 msrbit = MSR_VEC; 2567 if (!(regs->msr & msrbit)) 2568 return 0; |
2568 if (!address_ok(regs, op.ea, size) || 2569 __copy_from_user(mem, (void __user *)op.ea, size)) | 2569 if (!address_ok(regs, ea, size) || 2570 __copy_from_user(mem, (void __user *)ea, size)) |
2570 return 0; 2571 2572 emulate_vsx_load(&op, &buf, mem); 2573 load_vsrn(op.reg, &buf); 2574 goto ldst_done; 2575 } 2576#endif 2577 case LOAD_MULTI: 2578 if (regs->msr & MSR_LE) 2579 return 0; 2580 rd = op.reg; 2581 for (i = 0; i < size; i += 4) { 2582 nb = size - i; 2583 if (nb > 4) 2584 nb = 4; | 2571 return 0; 2572 2573 emulate_vsx_load(&op, &buf, mem); 2574 load_vsrn(op.reg, &buf); 2575 goto ldst_done; 2576 } 2577#endif 2578 case LOAD_MULTI: 2579 if (regs->msr & MSR_LE) 2580 return 0; 2581 rd = op.reg; 2582 for (i = 0; i < size; i += 4) { 2583 nb = size - i; 2584 if (nb > 4) 2585 nb = 4; |
2585 err = read_mem(®s->gpr[rd], op.ea, nb, regs); | 2586 err = read_mem(®s->gpr[rd], ea, nb, regs); |
2586 if (err) 2587 return 0; 2588 if (nb < 4) /* left-justify last bytes */ 2589 regs->gpr[rd] <<= 32 - 8 * nb; | 2587 if (err) 2588 return 0; 2589 if (nb < 4) /* left-justify last bytes */ 2590 regs->gpr[rd] <<= 32 - 8 * nb; |
2590 op.ea += 4; | 2591 ea += 4; |
2591 ++rd; 2592 } 2593 goto instr_done; 2594 2595 case STORE: 2596#ifdef __powerpc64__ 2597 if (size == 16) { | 2592 ++rd; 2593 } 2594 goto instr_done; 2595 2596 case STORE: 2597#ifdef __powerpc64__ 2598 if (size == 16) { |
2598 err = emulate_stq(regs, op.ea, op.reg); | 2599 err = emulate_stq(regs, ea, op.reg); |
2599 goto ldst_done; 2600 } 2601#endif 2602 if ((op.type & UPDATE) && size == sizeof(long) && 2603 op.reg == 1 && op.update_reg == 1 && 2604 !(regs->msr & MSR_PR) && | 2600 goto ldst_done; 2601 } 2602#endif 2603 if ((op.type & UPDATE) && size == sizeof(long) && 2604 op.reg == 1 && op.update_reg == 1 && 2605 !(regs->msr & MSR_PR) && |
2605 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 2606 err = handle_stack_update(op.ea, regs); | 2606 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 2607 err = handle_stack_update(ea, regs); |
2607 goto ldst_done; 2608 } | 2608 goto ldst_done; 2609 } |
2609 err = write_mem(op.val, op.ea, size, regs); | 2610 err = write_mem(op.val, ea, size, regs); |
2610 goto ldst_done; 2611 2612#ifdef CONFIG_PPC_FPU 2613 case STORE_FP: 2614 if (!(regs->msr & MSR_FP)) 2615 return 0; 2616 if (size == 4) | 2611 goto ldst_done; 2612 2613#ifdef CONFIG_PPC_FPU 2614 case STORE_FP: 2615 if (!(regs->msr & MSR_FP)) 2616 return 0; 2617 if (size == 4) |
2617 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); | 2618 err = do_fp_store(op.reg, do_stfs, ea, size, regs); |
2618 else | 2619 else |
2619 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); | 2620 err = do_fp_store(op.reg, do_stfd, ea, size, regs); |
2620 goto ldst_done; 2621#endif 2622#ifdef CONFIG_ALTIVEC 2623 case STORE_VMX: 2624 if (!(regs->msr & MSR_VEC)) 2625 return 0; | 2621 goto ldst_done; 2622#endif 2623#ifdef CONFIG_ALTIVEC 2624 case STORE_VMX: 2625 if (!(regs->msr & MSR_VEC)) 2626 return 0; |
2626 err = do_vec_store(op.reg, do_stvx, op.ea, regs); | 2627 err = do_vec_store(op.reg, do_stvx, ea, regs); |
2627 goto ldst_done; 2628#endif 2629#ifdef CONFIG_VSX 2630 case STORE_VSX: { 2631 char mem[16]; 2632 union vsx_reg buf; 2633 unsigned long msrbit = MSR_VSX; 2634 2635 /* 2636 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 2637 * when the target of the instruction is a vector register. 2638 */ 2639 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) 2640 msrbit = MSR_VEC; 2641 if (!(regs->msr & msrbit)) 2642 return 0; | 2628 goto ldst_done; 2629#endif 2630#ifdef CONFIG_VSX 2631 case STORE_VSX: { 2632 char mem[16]; 2633 union vsx_reg buf; 2634 unsigned long msrbit = MSR_VSX; 2635 2636 /* 2637 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 2638 * when the target of the instruction is a vector register. 2639 */ 2640 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) 2641 msrbit = MSR_VEC; 2642 if (!(regs->msr & msrbit)) 2643 return 0; |
2643 if (!address_ok(regs, op.ea, size)) | 2644 if (!address_ok(regs, ea, size)) |
2644 return 0; 2645 2646 store_vsrn(op.reg, &buf); 2647 emulate_vsx_store(&op, &buf, mem); | 2645 return 0; 2646 2647 store_vsrn(op.reg, &buf); 2648 emulate_vsx_store(&op, &buf, mem); |
2648 if (__copy_to_user((void __user *)op.ea, mem, size)) | 2649 if (__copy_to_user((void __user *)ea, mem, size)) |
2649 return 0; 2650 goto ldst_done; 2651 } 2652#endif 2653 case STORE_MULTI: 2654 if (regs->msr & MSR_LE) 2655 return 0; 2656 rd = op.reg; 2657 for (i = 0; i < size; i += 4) { 2658 val = regs->gpr[rd]; 2659 nb = size - i; 2660 if (nb > 4) 2661 nb = 4; 2662 else 2663 val >>= 32 - 8 * nb; | 2650 return 0; 2651 goto ldst_done; 2652 } 2653#endif 2654 case STORE_MULTI: 2655 if (regs->msr & MSR_LE) 2656 return 0; 2657 rd = op.reg; 2658 for (i = 0; i < size; i += 4) { 2659 val = regs->gpr[rd]; 2660 nb = size - i; 2661 if (nb > 4) 2662 nb = 4; 2663 else 2664 val >>= 32 - 8 * nb; |
2664 err = write_mem(val, op.ea, nb, regs); | 2665 err = write_mem(val, ea, nb, regs); |
2665 if (err) 2666 return 0; | 2666 if (err) 2667 return 0; |
2667 op.ea += 4; | 2668 ea += 4; |
2668 ++rd; 2669 } 2670 goto instr_done; 2671 2672 case MFMSR: 2673 regs->gpr[op.reg] = regs->msr & MSR_MASK; 2674 goto instr_done; 2675 --- 47 unchanged lines hidden --- | 2669 ++rd; 2670 } 2671 goto instr_done; 2672 2673 case MFMSR: 2674 regs->gpr[op.reg] = regs->msr & MSR_MASK; 2675 goto instr_done; 2676 --- 47 unchanged lines hidden --- |