Searched refs:VDUP (Results 1 – 9 of 9) sorted by relevance
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMScheduleR52.td | 798 def : InstRW<[R52WriteFPALU_F3, R52Read_ISS], (instregex "VDUP(8|16|32)d")>; 799 def : InstRW<[R52Write2FPALU_F3, R52Read_ISS], (instregex "VDUP(8|16|32)q")>;
|
H A D | ARMISelLowering.h | 199 VDUP, enumerator
|
H A D | ARMISelLowering.cpp | 1791 MAKE_CASE(ARMISD::VDUP) in getTargetNodeName() 7945 SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const); in LowerBUILD_VECTOR() 7989 SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const); in LowerBUILD_VECTOR() 8074 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); in LowerBUILD_VECTOR() 8108 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); in LowerBUILD_VECTOR() 8841 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); in LowerVECTOR_SHUFFLE() 8855 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); in LowerVECTOR_SHUFFLE() 14086 if (VDup->getOpcode() != ARMISD::VDUP) in PerformSUBCombine() 14100 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate); in PerformSUBCombine() 15474 if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) in PerformVCMPCombine() [all …]
|
H A D | ARMFeatures.td | 252 // True if VMOV will be favored over VDUP.
|
H A D | ARMScheduleSwift.td | 635 (instregex "VDUP(8|16|32)")>;
|
H A D | ARMScheduleA57.td | 1202 def : InstRW<[A57Write_8cyc_1L_1V], (instregex "VDUP(8|16|32)(d|q)")>;
|
H A D | ARMInstrNEON.td | 6633 // VDUP : Vector Duplicate (from ARM core register to all elements) 6652 // ARMvdup patterns for uarchs with fast VDUP.32. 6658 // ARMvdup patterns for uarchs with slow VDUP.32 - use VMOVDRR instead. 6664 // VDUP : Vector Duplicate Lane (from scalar to all elements)
|
H A D | ARMISelDAGToDAG.cpp | 3206 case ARMISD::VDUP: { in transformFixedFloatingPointConversion()
|
H A D | ARMInstrInfo.td | 255 def ARMvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
|