/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonDepMapAsm2Intrin.td | 14 def: Pat<(int_hexagon_A2_abs IntRegs:$src1), 15 (A2_abs IntRegs:$src1)>, Requires<[HasV5]>; 16 def: Pat<(int_hexagon_A2_absp DoubleRegs:$src1), 17 (A2_absp DoubleRegs:$src1)>, Requires<[HasV5]>; 18 def: Pat<(int_hexagon_A2_abssat IntRegs:$src1), 19 (A2_abssat IntRegs:$src1)>, Requires<[HasV5]>; 20 def: Pat<(int_hexagon_A2_add IntRegs:$src1, IntRegs:$src2), 21 (A2_add IntRegs:$src1, IntRegs:$src2)>, Requires<[HasV5]>; 22 def: Pat<(int_hexagon_A2_addh_h16_hh IntRegs:$src1, IntRegs:$src2), 23 (A2_addh_h16_hh IntRegs:$src1, IntRegs:$src2)>, Requires<[HasV5]>; [all …]
|
H A D | HexagonMapAsm2IntrinV62.gen.td | 10 def: Pat<(IntID HvxVR:$src1, IntRegs:$src2), 11 (MI HvxVR:$src1, IntRegs:$src2)>; 12 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2), 13 (MI HvxVR:$src1, IntRegs:$src2)>; 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 19 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2, 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 25 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2), 26 (MI HvxVR:$src1, HvxVR:$src2)>; [all …]
|
H A D | HexagonIntrinsicsV60.td | 15 def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 HvxWR:$src1))), 16 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_lo)) >; 18 def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 HvxWR:$src1))), 19 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_hi)) >; 21 def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 HvxWR:$src1))), 22 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_lo)) >; 24 def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 HvxWR:$src1))), 25 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_hi)) >; 28 def : Pat <(v64i1 (bitconvert (v16i32 HvxVR:$src1))), 29 (v64i1 (V6_vandvrt(v16i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>; [all …]
|
H A D | HexagonIntrinsics.td | 127 def : Pat <(int_hexagon_C2_cmpgei I32:$src1, s32_0ImmPred_timm:$src2), 128 (C2_tfrpr (C2_cmpgti I32:$src1, (SDEC1 s32_0ImmPred:$src2)))>; 130 def : Pat <(int_hexagon_C2_cmpgeui I32:$src1, u32_0ImmPred_timm:$src2), 131 (C2_tfrpr (C2_cmpgtui I32:$src1, (UDEC1 u32_0ImmPred:$src2)))>; 135 def : Pat <(int_hexagon_C2_cmplt I32:$src1, I32:$src2), 136 (C2_tfrpr (C2_cmpgt I32:$src2, I32:$src1))>; 137 def : Pat <(int_hexagon_C2_cmpltu I32:$src1, I32:$src2), 138 (C2_tfrpr (C2_cmpgtu I32:$src2, I32:$src1))>; 145 : Pat <(IntID I32:$src1, I32:$src2, u4_0ImmPred_timm:$src3, u5_0ImmPred_timm:$src4), 146 (OutputInst I32:$src1, I32:$src2, u4_0ImmPred:$src3, [all …]
|
/freebsd/contrib/sendmail/libsm/ |
H A D | t-strl.c | 31 char src1[N][SIZE], dst1[SIZE], dst2[SIZE]; variable 92 (void) sm_strlcpy(src1[k], "abcdef", sizeof src1); 95 one = sm_strlcpyn(dst1, sizeof dst1, 3, src1[0], "/", src1[1]); 96 two = sm_snprintf(dst2, sizeof dst2, "%s/%s", src1[0], src1[1]); 99 one = sm_strlcpyn(dst1, 10, 3, src1[0], "/", src1[1]); 100 two = sm_snprintf(dst2, 10, "%s/%s", src1[0], src1[1]); 103 one = sm_strlcpyn(dst1, 5, 3, src1[0], "/", src1[1]); 104 two = sm_snprintf(dst2, 5, "%s/%s", src1[0], src1[1]); 107 one = sm_strlcpyn(dst1, 0, 3, src1[0], "/", src1[1]); 108 two = sm_snprintf(dst2, 0, "%s/%s", src1[0], src1[1]); [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrXOP.td | 97 (ins VR128:$src1, VR128:$src2), 98 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 100 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 103 (ins VR128:$src1, i128mem:$src2), 104 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 106 (vt128 (OpNode (vt128 VR128:$src1), 110 (ins i128mem:$src1, VR128:$src2), 111 !strconcat(OpcodeStr, "\t{$src2, $src1, [all...] |
H A D | X86InstrShiftRotate.td | 244 def : Pat<(rotl GR8:$src1, (i8 7)), (ROR8r1 GR8:$src1)>; 245 def : Pat<(rotl GR16:$src1, (i8 15)), (ROR16r1 GR16:$src1)>; 246 def : Pat<(rotl GR32:$src1, (i8 31)), (ROR32r1 GR32:$src1)>; 247 def : Pat<(rotl GR64:$src1, (i8 63)), (ROR64r1 GR64:$src1)>; 248 def : Pat<(rotr GR8:$src1, (i8 7)), (ROL8r1 GR8:$src1)>; [all...] |
H A D | X86InstrCMovSetCC.td | 19 (ins t.RegClass:$src1, t.RegClass:$src2, ccode:$cond), 21 [(set t.RegClass:$dst, (X86cmov t.RegClass:$src1, 25 (ins t.RegClass:$src1, t.MemOperand:$src2, ccode:$cond), 27 [(set t.RegClass:$dst, (X86cmov t.RegClass:$src1, 35 (ins t.RegClass:$src1, ccode:$cond), 38 (X86cmov 0, t.RegClass:$src1, timm:$cond, EFLAGS))]>, UseEFLAGS, NF; 40 (ins t.RegClass:$src1, ccode:$cond), 46 (ins t.RegClass:$src1, t.RegClass:$src2, ccode:$cond), 52 (ins t.MemOperand:$src1, ccode:$cond), 56 (ins t.RegClass:$src1, t.MemOperand:$src2, ccode:$cond), [all …]
|
H A D | X86InstrAMX.td | 65 def PTILELOADDV : PseudoI<(outs TILE:$dst), (ins GR16:$src1, 69 def PTILELOADDT1V : PseudoI<(outs TILE:$dst), (ins GR16:$src1, 73 def PTILESTOREDV : PseudoI<(outs), (ins GR16:$src1, 78 def PTILEZEROV : PseudoI<(outs TILE:$dst), (ins GR16:$src1, GR16:$src2), 80 GR16:$src1, GR16:$src2))]>; 86 def PTILELOADD : PseudoI<(outs), (ins u8imm:$src1, sibmem:$src2), []>; 88 def PTILELOADDT1 : PseudoI<(outs), (ins u8imm:$src1, 100 let Constraints = "$src1 = $dst" in { 102 (ins TILE:$src1, TILE:$src2, TILE:$src3), 106 (ins TILE:$src1, TILE:$src2, TILE:$src3), [all …]
|
H A D | X86InstrSSE.td | 26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], d>, 33 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), 36 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 37 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], d>, 49 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 52 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 53 [(set RC:$dst, (VT (OpNode RC:$src1, RC:$src2)))], d>, 56 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2), [all …]
|
H A D | X86InstrConditionalCompare.td | 14 : ITy<o, f, t, (outs), (ins op1:$src1, op2:$src2, cflags:$dcf, ccode:$cond), 15 … m#"${cond}", "$dcf\t{$src2, $src1|$src1, $src2}" , []>, T_MAP4, EVEX, Requires<[In64BitMode]> { 81 def : Pat<(X86ccmp GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond, EFLAGS), 82 (CCMP8rr GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond)>; 83 def : Pat<(X86ccmp GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond, EFLAGS), 84 (CCMP16rr GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond)>; 85 def : Pat<(X86ccmp GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond, EFLAGS), 86 (CCMP32rr GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond)>; 87 def : Pat<(X86ccmp GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond, EFLAGS), 88 (CCMP64rr GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond)>; [all …]
|
H A D | X86InstrFMA.td | 40 (ins RC:$src1, RC:$src2, RC:$src3), 43 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 48 (ins RC:$src1, RC:$src2, x86memop:$src3), 51 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, 61 (ins RC:$src1, RC:$src2, RC:$src3), 68 (ins RC:$src1, RC:$src2, x86memop:$src3), 72 RC:$src1)))]>, 81 (ins RC:$src1, RC:$src2, RC:$src3), 90 (ins RC:$src1, RC:$src2, x86memop:$src3), 93 [(set RC:$dst, (VT (Op (MemFrag addr:$src3), RC:$src1, [all …]
|
H A D | X86InstrCompiler.td | 882 def 16m : Ii8<0xBA, Form, (outs), (ins i16mem:$src1, i8imm:$src2), 883 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"), 884 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 16)))]>, 886 def 32m : Ii8<0xBA, Form, (outs), (ins i32mem:$src1, i8imm:$src2), 887 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"), 888 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 32)))]>, 890 def 64m : RIi8<0xBA, Form, (outs), (ins i64mem:$src1, i8imm:$src2), 891 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"), 892 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 64)))]>, 900 def 16rm : I<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), [all …]
|
H A D | X86InstrAVX512.td | 137 // ($src1) is already tied to $dst so we just use that for the preserved 139 // $src1. 149 !con((ins _.RC:$src1), NonTiedIns), 150 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns), 151 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns), 154 (Select _.KRCWM:$mask, RHS, _.RC:$src1), 167 !con((ins InVT.RC:$src1), NonTiedIns), 168 !con((ins InVT.RC:$src1, InVT.KRCWM:$mask), NonTiedIns), 169 !con((ins InVT.RC:$src1, InVT.KRCWM:$mask), NonTiedIns), 172 (bitconvert InVT.RC:$src1)), [all …]
|
H A D | X86InstrKL.td | 23 (ins VR128:$src1, opaquemem:$src2), 24 "aesenc128kl\t{$src2, $src1|$src1, $src2}", 25 [(set VR128:$dst, EFLAGS, (X86aesenc128kl VR128:$src1, addr:$src2))]>, 28 (ins VR128:$src1, opaquemem:$src2), 29 "aesdec128kl\t{$src2, $src1|$src1, $src2}", 30 [(set VR128:$dst, EFLAGS, (X86aesdec128kl VR128:$src1, addr:$src2))]>, 33 (ins VR128:$src1, opaquemem:$src2), 34 "aesenc256kl\t{$src2, $src1|$src1, $src2}", 35 [(set VR128:$dst, EFLAGS, (X86aesenc256kl VR128:$src1, addr:$src2))]>, 38 (ins VR128:$src1, opaquemem:$src2), [all …]
|
H A D | X86InstrAsmAlias.td | 70 def : InstAlias<"ccmp"#Cond#"{b} $dcf\t{$src2, $src1|$src1, $src2}", 71 (CCMP8rr GR8:$src1, GR8:$src2, cflags:$dcf, CC), 0>; 72 def : InstAlias<"ccmp"#Cond#"{w} $dcf\t{$src2, $src1|$src1, $src2}", 73 (CCMP16rr GR16:$src1, GR16:$src2, cflags:$dcf, CC), 0>; 74 def : InstAlias<"ccmp"#Cond#"{l} $dcf\t{$src2, $src1|$src1, $src2}", 75 (CCMP32rr GR32:$src1, GR32:$src2, cflags:$dcf, CC), 0>; 76 def : InstAlias<"ccmp"#Cond#"{q} $dcf\t{$src2, $src1|$src1, $src2}", 77 (CCMP64rr GR64:$src1, GR64:$src2, cflags:$dcf, CC), 0>; 78 def : InstAlias<"ccmp"#Cond#"{b} $dcf\t{$src2, $src1|$src1, $src2}", 79 (CCMP8rm GR8:$src1, i8mem:$src2, cflags:$dcf, CC), 0>; [all …]
|
H A D | X86InstrMisc.td | 617 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), 618 "bt{w}\t{$src2, $src1|$src1, $src2}", 619 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>, 621 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), 622 "bt{l}\t{$src2, $src1|$src1, $src2}", 623 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>, 625 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), 626 "bt{q}\t{$src2, $src1|$src1, $src2}", 627 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB; 637 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), [all …]
|
H A D | X86InstrVMX.td | 19 def INVEPT32 : I<0x80, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2), 20 "invept\t{$src2, $src1|$src1, $src2}", []>, T8, PD, 22 def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2), 23 "invept\t{$src2, $src1|$src1, $src2}", []>, T8, PD, 25 def INVEPT64_EVEX : I<0xF0, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2), 26 "invept\t{$src2, $src1|$src1, $src2}", []>, 30 def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2), 31 "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8, PD, 33 def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2), 34 "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8, PD, [all …]
|
/freebsd/contrib/llvm-project/clang/lib/Headers/ |
H A D | amxintrin.h | 139 /// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit 151 /// \param src1 153 #define _tile_dpbssd(dst, src0, src1) \ argument 154 __builtin_ia32_tdpbssd((dst), (src0), (src1)) 158 /// corresponding unsigned 8-bit integers in src1, producing 4 intermediate 170 /// \param src1 172 #define _tile_dpbsud(dst, src0, src1) \ argument 173 __builtin_ia32_tdpbsud((dst), (src0), (src1)) 177 /// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit 189 /// \param src1 [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUInstrInfo.td | 208 // out = (src0 + src1 > 0xFFFFFFFF) ? 1 : 0 211 // out = (src1 > src0) ? 1 : 0 248 // src1 = Denominator, src2 = Numerator). 253 // Special case divide fixup and flags(src0 = Quotient, src1 = 273 // src1: dst - rat offset (aka pointer) in dwords 350 SDTCisSameAs<3, 2>, // f32 src1 422 def AMDGPUfp_class : PatFrags<(ops node:$src0, node:$src1), 423 [(int_amdgcn_class node:$src0, node:$src1), 424 (AMDGPUfp_class_impl node:$src0, node:$src1)]>; 426 def AMDGPUfmed3 : PatFrags<(ops node:$src0, node:$src1, node:$src2), [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ |
H A D | Execution.cpp | 105 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 108 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, in executeFAddInst() argument 119 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, in executeFSubInst() argument 130 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, in executeFMulInst() argument 141 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, in executeFDivInst() argument 152 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, in executeFRemInst() argument 156 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); in executeFRemInst() 159 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); in executeFRemInst() 169 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 175 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ [all …]
|
/freebsd/contrib/arm-optimized-routines/string/aarch64/ |
H A D | memcmp.S | 14 #define src1 x0 macro 39 ldp data1, data3, [src1] 45 add src1end, src1, limit 55 ldp data1, data3, [src1, 16] 63 ldp data1, data3, [src1, 32] 68 add src1, src1, 32 96 add src1end, src1, limit 99 ldr data1, [src1] 108 ldr data1w, [src1] 116 ldrh data1w, [src1] [all …]
|
H A D | strcmp.S | 20 #define src1 x0 macro 56 sub off2, src2, src1 58 and tmp, src1, 7 66 ldr data2, [src1, off2] 67 ldr data1, [src1], 8 110 bic src1, src1, 7 111 ldr data2, [src1, off2] 112 ldr data1, [src1], 8 121 /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always 125 ldrb data1w, [src1], 1 [all …]
|
/freebsd/sys/arm64/arm64/ |
H A D | memcmp.S | 19 #define src1 x0 macro 38 ldr data1, [src1], 8 46 ldr data1, [src1, limit] 51 ldr data1, [src1], 8 61 /* We overlap loads between 0-32 bytes at either side of SRC1 when we 66 /* Align src1 and adjust src2 with bytes not yet done. */ 67 and tmp1, src1, 15 69 sub src1, src1, tmp1 72 /* Loop performing 16 bytes per iteration using aligned src1. 77 ldp data1, data1h, [src1], 16 [all …]
|
H A D | strcmp.S | 24 #define src1 x0 macro 58 sub off2, src2, src1 60 and tmp, src1, 7 68 ldr data2, [src1, off2] 69 ldr data1, [src1], 8 112 bic src1, src1, 7 113 ldr data2, [src1, off2] 114 ldr data1, [src1], 8 123 /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always 127 ldrb data1w, [src1], 1 [all …]
|