Lines Matching +full:0 +full:x20030000
91 #define REG_LO(r) (bpf2arc[(r)][0])
110 ZZ_4_byte = 0,
126 AA_none = 0,
134 X_zero = 0,
140 CC_always = 0, /* condition is true all the time */
155 #define IN_U6_RANGE(x) ((x) <= (0x40 - 1) && (x) >= 0)
156 #define IN_S9_RANGE(x) ((x) <= (0x100 - 1) && (x) >= -0x100)
157 #define IN_S12_RANGE(x) ((x) <= (0x800 - 1) && (x) >= -0x800)
158 #define IN_S21_RANGE(x) ((x) <= (0x100000 - 1) && (x) >= -0x100000)
159 #define IN_S25_RANGE(x) ((x) <= (0x1000000 - 1) && (x) >= -0x1000000)
162 #define OP_A(x) ((x) & 0x03f)
163 #define OP_B(x) ((((x) & 0x07) << 24) | (((x) & 0x38) << 9))
164 #define OP_C(x) (((x) & 0x03f) << 6)
172 * 0010_0bbb 0000_1010 0BBB_cccc cc00_0000
177 #define OPC_MOV 0x200a0000
182 * 0010_0bbb 1000_1010 0BBB_ssss ssSS_SSSS
187 #define OPC_MOVI 0x208a0000
188 #define MOVI_S12(x) ((((x) & 0xfc0) >> 6) | (((x) & 0x3f) << 6))
194 * 0010_0bbb 1100_1010 0BBB_cccc cciq_qqqq
202 #define OPC_MOV_CC 0x20ca0000
209 * 0010_0bbb 0010_1111 0BBB_cccc cc00_0101
214 #define OPC_SEXB 0x202f0005
219 * 0010_0bbb 0010_1111 0BBB_cccc cc00_0110
224 #define OPC_SEXH 0x202f0006
239 #define OPC_LOAD 0x10000000
243 #define LOAD_S9(x) ((((x) & 0x0ff) << 16) | (((x) & 0x100) << 7))
244 #define LOAD_C(x) ((x) & 0x03f)
266 #define OPC_STORE 0x18000000
269 #define STORE_S9(x) ((((x) & 0x0ff) << 16) | (((x) & 0x100) << 7))
279 * 0010_0bbb 0i00_0000 fBBB_cccc ccaa_aaaa
288 #define OPC_ADD 0x20000000
300 * 0010_0bbb 0i00_0001 0BBB_cccc ccaa_aaaa
308 #define OPC_ADC 0x20010000
316 * 0010_0bbb 0i00_0010 fBBB_cccc ccaa_aaaa
325 #define OPC_SUB 0x20020000
344 #define OPC_SBC 0x20030000
356 #define OPC_CMP 0x20cc8000
361 * 0010_0bbb 0100_1110 0BBB_0000 00aa_aaaa
366 #define OPC_NEG 0x204e0000
373 * 0010_0bbb 0001_1010 0BBB_cccc ccaa_aaaa
379 #define OPC_MPY 0x201a0000
387 * 0010_1bbb 0001_1001 0BBB_cccc ccaa_aaaa
393 #define OPC_MPYDU 0x28190000
399 * 0010_1bbb 0000_0101 0BBB_cccc ccaa_aaaa
405 #define OPC_DIVU 0x28050000
411 * 0010_1bbb 0000_0100 0BBB_cccc ccaa_aaaa
417 #define OPC_DIVS 0x28040000
423 * 0010_1bbb 0000_1001 0BBB_cccc ccaa_aaaa
429 #define OPC_REMU 0x28090000
435 * 0010_1bbb 0000_1000 0BBB_cccc ccaa_aaaa
441 #define OPC_REMS 0x28080000
455 #define OPC_AND 0x20040000
470 #define OPC_TST 0x20cb8000
475 * 0010_0bbb 0000_0101 0BBB_cccc ccaa_aaaa
481 #define OPC_OR 0x20050000
487 * 0010_0bbb 0000_0111 0BBB_cccc ccaa_aaaa
493 #define OPC_XOR 0x20070000
499 * 0010_0bbb 0010_1111 0BBB_cccc cc00_1010
504 #define OPC_NOT 0x202f000a
514 #define OPC_BTSTU6 0x20518000
520 * 0010_1bbb 0i00_0000 0BBB_cccc ccaa_aaaa
527 #define OPC_ASL 0x28000000
535 * 0010_1bbb 0i00_0010 0BBB_cccc ccaa_aaaa
543 #define OPC_ASR 0x28020000
551 * 0010_1bbb 0i00_0001 0BBB_cccc ccaa_aaaa
559 #define OPC_LSR 0x28010000
567 * 0010_1bbb 0010_1111 0bbb_cccc cc00_1001
572 #define OPC_SWAPE 0x282f0009
582 #define OPC_JMP 0x20e00000
594 #define OPC_JL 0x20220000
598 * that is word aligned: (PC & 0xffff_fffc) + s21
607 * it should be a multiple of 2. Hence, there is an implied '0' bit at its
610 #define OPC_BCC 0x00000000
611 #define BCC_S21(d) ((((d) & 0x7fe) << 16) | (((d) & 0x1ff800) >> 5))
615 * that is word aligned: (PC & 0xffff_fffc) + s25
623 * it should be a multiple of 2. Hence, there is an implied '0' bit at its
626 #define OPC_B 0x00010000
627 #define B_S25(d) ((((d) & 0x1e00000) >> 21) | BCC_S21(d))
637 emit_2_bytes(buf + 2, bytes & 0xffff); in emit_4_bytes()
1308 return arc_movi_r(buf, REG_HI(rd), 0); in zext()
1310 return 0; in zext()
1315 u8 len = 0; in mov_r32()
1343 u8 len = 0; in mov_r64()
1361 return 0; in mov_r64()
1369 len += arc_movi_r(BUF(buf, len), REG_HI(rd), 0); in mov_r64()
1377 u8 len = 0; in mov_r64_i32()
1383 if (imm >= 0) in mov_r64_i32()
1384 len += arc_movi_r(BUF(buf, len), REG_HI(reg), 0); in mov_r64_i32()
1398 * LD R <- 0x0000_0001_0000_0001
1408 * LD R <- 0x0000_0000_1234_5678
1412 * mov R_lo, 0x12345678 # this is an 8-byte instruction
1413 * mov R_hi, 0 # still 4 bytes
1440 * {ld,st} r, [r10, 0]
1445 u8 len = 0; in adjust_mem_access()
1453 *off = 0; in adjust_mem_access()
1488 * mov r21, {0,-1}
1503 imm = (imm >= 0 ? 0 : -1); in store_i()
1523 u8 len = 0; in push_r64()
1563 len += arc_movi_r(BUF(buf, len), REG_HI(rd), 0); in load_r()
1569 * ld rx, [rb, off+0] in load_r()
1577 * ld rx, [rb, off+0] in load_r()
1623 len += arc_adci_r(BUF(buf, len), REG_HI(rd), 0); in add_r64_i32()
1754 u8 len = 0; in mul_r64_i32()
1757 return 0; in mul_r64_i32()
1760 if (imm < 0) in mul_r64_i32()
1768 if (imm < 0) in mul_r64_i32()
1784 if (imm == 0) in div_r32_i32()
1785 return 0; in div_r32_i32()
1803 if (imm == 0) in mod_r32_i32()
1804 return 0; in mod_r32_i32()
1922 * lo = 0
1931 * asl B_lo, B_lo, t0 # version, when C_lo=0, t1 becomes B_lo while
1932 * asl B_hi, B_hi, t0 # it should be 0. The "not" approach instead,
1934 * btst t0, 5 # setting it to 0 when C_lo=0.
1936 * mov.ne B_lo, 0
1966 len += arc_movu_cc_r(BUF(buf, len), CC_unequal, B_lo, 0); in lsh_r64()
1979 * lo = 0
1987 u8 len = 0; in lsh_r64_i32()
1989 if (n == 0) { in lsh_r64_i32()
1990 return 0; in lsh_r64_i32()
1998 len += arc_movi_r(BUF(buf, len), B_lo, 0); in lsh_r64_i32()
2028 * hi = 0
2041 * mov.ne B_hi, 0
2061 len += arc_movu_cc_r(BUF(buf, len), CC_unequal, B_hi, 0); in rsh_r64()
2068 * to_lo = B_lo << 32-n # extract lower n bits, right-padded with 32-n 0s
2074 * hi = 0
2082 u8 len = 0; in rsh_r64_i32()
2084 if (n == 0) { in rsh_r64_i32()
2085 return 0; in rsh_r64_i32()
2093 len += arc_movi_r(BUF(buf, len), B_hi, 0); in rsh_r64_i32()
2136 * asr t0, B_hi, 31 # now, t0 = 0 or -1 based on B_hi's sign
2166 * to_lo = lo << 32-n # extract lower n bits, right-padded with 32-n 0s
2172 * hi = (lo[msb] ? -1 : 0)
2180 u8 len = 0; in arsh_r64_i32()
2182 if (n == 0) { in arsh_r64_i32()
2183 return 0; in arsh_r64_i32()
2193 len += arc_movu_cc_r(BUF(buf, len), CC_equal, B_hi, 0); in arsh_r64_i32()
2202 u8 len = 0; in gen_swap()
2247 len = arc_and_i(buf, REG_LO(rd), 0xffff); in gen_swap()
2338 * 180: mov r10,0 | 180: mov r10,0x700578d8
2340 * 188: add.f r16,r16,0x1 | 18c: adc r17,r17,0
2341 * 18c: adc r17,r17,0 |
2343 * In the above example, the change from "r10 <- 0" to "r10 <- 0x700578d8"
2368 u32 usage = 0; in mask_for_used_regs()
2392 usage |= is_call ? BIT(ARC_R_BLINK) : 0; in mask_for_used_regs()
2399 * push r[0-n] # if r[i] is marked as clobbered
2401 * mov fp, sp # if frame_size > 0 (clobbers fp)
2406 u8 len = 0; in arc_prologue()
2407 u32 gp_regs = 0; in arc_prologue()
2422 if ((usage & BIT(ARC_R_FP)) || frame_size > 0) in arc_prologue()
2425 if (frame_size > 0) in arc_prologue()
2429 if ((usage & BIT(ARC_R_FP)) && frame_size == 0) { in arc_prologue()
2439 * mov sp, fp # if frame_size > 0
2441 * pop r[n-0] # if r[i] is marked as clobbered
2447 * "fp being marked as clobbered" and "frame_size > 0" are the two sides of
2452 u32 len = 0; in arc_epilogue()
2453 u32 gp_regs = 0; in arc_epilogue()
2456 if ((usage & BIT(ARC_R_FP)) && frame_size == 0) { in arc_epilogue()
2462 if (frame_size > 0) in arc_epilogue()
2466 if ((usage & BIT(ARC_R_FP)) || frame_size > 0) in arc_epilogue()
2503 * 0x1000: cmp r3, r1 # 0x1000 is the JIT address for "BPF_JGE ..." insn
2504 * 0x1004: bhi @target # first jump (branch higher)
2505 * 0x1008: blo @end # second jump acting as a skip (end is 0x1014)
2506 * 0x100C: cmp r2, r0 # the lower 32 bits are evaluated
2507 * 0x1010: bhs @target # third jump (branch higher or same)
2508 * 0x1014: ...
2517 * 0x1000 + 4 - @target
2680 u8 len = 0; in gen_j_eq_64()
2697 u8 len = 0; in gen_jset_64()
2719 for (i = 0; i < JCC64_NR_OF_JMPS; i++) { in check_jcc_64()
2797 * (0x0000_0000,0x8000_0000) s>= (0x0000_0000,0x0000_0000)
2810 u8 len = 0; in gen_jcc_64()
2817 len += arc_bcc(BUF(buf, len), cc[0], disp); in gen_jcc_64()
2841 u8 len = 0; in gen_jmp_64()
2916 addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal; in check_jmp_32()
2942 u8 len = 0; in gen_jmp_32()
2954 return 0; in gen_jmp_32()
2990 u8 len = 0; in gen_func_call()