Lines Matching refs:b10
290 def MVE_v4i32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v2i1, 0b10, "i", ?>;
298 def MVE_v4s32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v2i1, 0b10, "s", 0b0>;
302 def MVE_v4u32 : MVEVectorVTInfo<v4i32, v2i64, v4i1, v2i1, 0b10, "u", 0b1>;
307 def MVE_v4f32 : MVEVectorVTInfo<v4f32, v2f64, v4i1, v2i1, 0b10, "f", ?>;
475 def MVE_SRSHR : MVE_ScalarShiftSRegImm<"srshr", 0b10>;
496 def MVE_SQRSHR : MVE_ScalarShiftSRegReg<"sqrshr", 0b10>;
571 def MVE_ASRLi : MVE_ScalarShiftDRegImm<"asrl", 0b10, ?, [(set tGPREven:$RdaLo, tGPROdd:$RdaHi,
586 def MVE_SRSHRL : MVE_ScalarShiftDRegImm<"srshrl", 0b10, 0b1>;
761 suffix, "$RdaLo, $RdaHi, $Qm", cstr, 0b10, pattern> {
768 let Inst{19-18} = 0b10;
833 "$RdaDest = $RdaSrc", !if(sz, 0b01, 0b10), pattern> {
1397 defm MVE_VMLSLDAV : MVE_VMLSLDAV_multi<"vmlsldav", "s32", 0b1, 0b0, 0b10>;
1398 defm MVE_VRMLSLDAVH : MVE_VMLSLDAV_multi<"vrmlsldavh", "s32", 0b0, 0b1, 0b10>;
1551 def MVE_VREV64_32 : MVE_VREV<"vrev64", "32", 0b10, 0b00, 0b11, "@earlyclobber $Qd">;
1553 def MVE_VREV32_8 : MVE_VREV<"vrev32", "8", 0b00, 0b01, 0b10>;
1554 def MVE_VREV32_16 : MVE_VREV<"vrev32", "16", 0b01, 0b01, 0b10>;
1556 def MVE_VREV16_8 : MVE_VREV<"vrev16", "8", 0b00, 0b10, 0b01>;
1632 def MVE_VORR : MVE_bit_ops<"vorr", 0b10, 0b0>;
1802 let VecSize = 0b10;
2383 let Inst{21-20} = 0b10;
2395 def MVE_VDUP32 : MVE_VDUP<"32", 0b0, 0b0, 0b10>;
2618 def MVE_VMOVimmi32 : MVE_mod_imm<"vmov", "i32", {?,?,?,?}, 0b0, (ins nImmVMOVI32:$imm), 0b10> {
2622 def MVE_VMOVimmf32 : MVE_mod_imm<"vmov", "f32", {1,1,1,1}, 0b0, (ins nImmVMOVF32:$imm), 0b10>;
2628 def MVE_VMVNimmi32 : MVE_mod_imm<"vmvn", "i32", {?,?,?,?}, 0b1, (ins nImmVMOVI32:$imm), 0b10> {
2724 vpred_n, "$RdmDest = $RdmSrc,$Qd = $QdSrc", 0b10> {
2853 : MVE_VSHLL_imm<iname, suffix, U, th, mve_shift_imm1_15, 0b10, pattern> {
2871 iname, suffix, ops, vpred_r, "", !if(size, 0b10, 0b01), pattern> {
2962 def MVE_VRSHRNi32bh : MVE_VxSHRN<"vrshrnb", "i32", 0b0, 0b1, shr_imm16, 0b10> {
2965 def MVE_VRSHRNi32th : MVE_VxSHRN<"vrshrnt", "i32", 0b1, 0b1, shr_imm16, 0b10> {
2975 def MVE_VSHRNi32bh : MVE_VxSHRN<"vshrnb", "i32", 0b0, 0b0, shr_imm16, 0b10> {
2978 def MVE_VSHRNi32th : MVE_VxSHRN<"vshrnt", "i32", 0b1, 0b0, shr_imm16, 0b10> {
3008 "vqrshrunb", "s32", 0b1, 0b0, shr_imm16, 0b10> {
3012 "vqrshrunt", "s32", 0b1, 0b1, shr_imm16, 0b10> {
3025 "vqshrunb", "s32", 0b0, 0b0, shr_imm16, 0b10> {
3029 "vqshrunt", "s32", 0b0, 0b1, shr_imm16, 0b10> {
3058 def s32 : MVE_VxQRSHRN<iname, "s32", bit_0, bit_12, shr_imm16, 0b10> {
3062 def u32 : MVE_VxQRSHRN<iname, "u32", bit_0, bit_12, shr_imm16, 0b10> {
3243 let Inst{10-9} = 0b10;
3258 def MVE_VSRIimm32 : MVE_VSxI_imm<"vsri", "32", 0b0, shr_imm32, 0b10> {
3270 def MVE_VSLIimm32 : MVE_VSxI_imm<"vsli", "32", 0b1,imm0_31, 0b10> {
3474 def MVE_VSHR_imms32 : MVE_VSHR_imm<"s32", (ins shr_imm32:$imm), 0b10> {
3479 def MVE_VSHR_immu32 : MVE_VSHR_imm<"u32", (ins shr_imm32:$imm), 0b10> {
3504 def MVE_VSHL_immi32 : MVE_VSHL_imm<"i32", (ins imm0_31:$imm), 0b10> {
3570 let Inst{17-16} = 0b10;
3894 "$Qd, $Qm, $imm6", vpred_r, "", !if(fsi, 0b10, 0b01), []> {
4019 defm p : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "p", 0b10>;
4234 : MVE_VCMPqq<suffix, size, 0b11, pred_basic_fp, !if(size, 0b01, 0b10)> {
4260 def MVE_VCMPi32 : MVE_VCMPqqi<"i32", 0b10>;
4264 def MVE_VCMPu32 : MVE_VCMPqqu<"u32", 0b10>;
4268 def MVE_VCMPs32 : MVE_VCMPqqs<"s32", 0b10>;
4299 : MVE_VCMPqr<suffix, size, 0b11, pred_basic_fp, !if(size, 0b01, 0b10)> {
4325 def MVE_VCMPi32r : MVE_VCMPqri<"i32", 0b10>;
4329 def MVE_VCMPu32r : MVE_VCMPqru<"u32", 0b10>;
4333 def MVE_VCMPs32r : MVE_VCMPqrs<"s32", 0b10>;
4701 int_arm_mve_mull_int_predicated, 0b0, 0b10>;
4703 int_arm_mve_mull_int_predicated, 0b1, 0b10>;
4716 int_arm_mve_mull_int_predicated, 0b0, 0b10>;
4718 int_arm_mve_mull_int_predicated, 0b1, 0b10>;
4731 int_arm_mve_mull_poly_predicated, 0b0, 0b10>;
4733 int_arm_mve_mull_poly_predicated, 0b1, 0b10>;
4848 vpred_n, "$Qd = $Qd_src", !if(size, 0b10, 0b01), pattern> {
4993 vpred, cstr, 0b10, []> {
5104 "$Qd, $Qn, $Qm", vpred_r, "", 0b10, pattern> {
5131 vpred_r, cstr, !if(size, 0b10, 0b01), pattern> {
5315 : MVE_qDest_rSrc<iname, suffix, cstr, !if(size, 0b10, 0b01), pattern> {
5505 def MVE_VBRSR32 : MVE_VBRSR<"vbrsr", "32", 0b10>;
5786 def MVE_VIDUPu32 : MVE_VxDUP<"vidup", "u32", 0b10, 0b0, v4i32, ARMvidup>;
5790 def MVE_VDDUPu32 : MVE_VxDUP<"vddup", "u32", 0b10, 0b1, v4i32, null_frag>;
5822 def MVE_VIWDUPu32 : MVE_VxWDUP<"viwdup", "u32", 0b10, 0b0>;
5826 def MVE_VDWDUPu32 : MVE_VxWDUP<"vdwdup", "u32", 0b10, 0b1>;
5834 let Inst{28-27} = 0b10;
5888 let VecSize = 0b10;
6056 MVE_vldst24_lanesize<32, 0b10>] in
6157 def MVE_memW: MVE_memsz<0b10, 2, AddrModeT2_i7s4, "w", ["", "u", "s", "f"]>;
6303 defm MVE_VLDRBS32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "s32", 0, 0b10>;
6305 defm MVE_VLDRBU32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memB, "vldrb", "u32", 1, 0b10>;
6306 defm MVE_VLDRHS32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memH, "vldrh", "s32", 0, 0b10>;
6307 defm MVE_VLDRHU32: MVE_VLDRSTR_cw_m<MVE_ld, MVE_memH, "vldrh", "u32", 1, 0b10>;
6314 defm MVE_VSTRB32: MVE_VLDRSTR_cw_m<MVE_st, MVE_memB, "vstrb", "32", 0, 0b10>;
6315 defm MVE_VSTRH32: MVE_VLDRSTR_cw_m<MVE_st, MVE_memH, "vstrh", "32", 0, 0b10>;
6624 def MVE_VPTv4i32 : MVE_VPTt1i<"i32", 0b10>;
6635 def MVE_VPTv4u32 : MVE_VPTt1u<"u32", 0b10>;
6645 def MVE_VPTv4s32 : MVE_VPTt1s<"s32", 0b10>;
6668 def MVE_VPTv4i32r : MVE_VPTt2i<"i32", 0b10>;
6679 def MVE_VPTv4u32r : MVE_VPTt2u<"u32", 0b10>;
6689 def MVE_VPTv4s32r : MVE_VPTt2s<"s32", 0b10>;
6696 "", !if(size, 0b01, 0b10), pattern> {
6959 def MVE_DLSTP_32 : MVE_DLSTP<"dlstp.32", 0b10>;
6964 def MVE_WLSTP_32 : MVE_WLSTP<"wlstp.32", 0b10>;