/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | SelectionDAGNodes.h | 1499 EVT MemVT, MachineMemOperand *MMO) 1500 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { 2400 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, 2402 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { 2434 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT, 2436 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) { 2462 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT, 2464 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) { 2496 ISD::MemIndexedMode AM, EVT MemVT, 2498 : MemSDNode(NodeTy, Order, DL, VTs, MemVT, MMO) { [all …]
|
H A D | TargetLowering.h | 671 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, in storeOfVectorConstantIsCheap() argument 680 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { in mergeStoresAfterLegalization() argument 685 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, in canMergeStoresTo() argument 1440 EVT MemVT) const { in getLoadExtAction() argument 1441 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; in getLoadExtAction() 1443 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; in getLoadExtAction() 1451 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { in isLoadExtLegal() argument 1452 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; in isLoadExtLegal() 1457 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { in isLoadExtLegalOrCustom() argument 1458 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || in isLoadExtLegalOrCustom() [all …]
|
H A D | SelectionDAG.h | 1296 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, 1302 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, 1307 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT, 1312 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 1322 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 1329 EVT MemVT, MachinePointerInfo PtrInfo, 1335 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo, 1336 Alignment.value_or(getEVTAlign(MemVT)), Flags, 1341 ArrayRef<SDValue> Ops, EVT MemVT, 1374 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | R600ISelLowering.h | 49 bool canMergeStoresTo(unsigned AS, EVT MemVT, 57 bool canCombineTruncStore(EVT ValVT, EVT MemVT, in canCombineTruncStore() argument 63 return isTruncStoreLegal(ValVT, MemVT); in canCombineTruncStore()
|
H A D | R600ISelLowering.cpp | 1039 EVT MemVT = Store->getMemoryVT(); in lowerPrivateTruncStore() local 1072 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT); in lowerPrivateTruncStore() 1113 EVT MemVT = StoreNode->getMemoryVT(); in LowerSTORE() local 1129 NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), MemVT, in LowerSTORE() 1139 if (Alignment < MemVT.getStoreSize() && in LowerSTORE() 1140 !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, in LowerSTORE() 1155 if (MemVT == MVT::i8) { in LowerSTORE() 1158 assert(MemVT == MVT::i16); in LowerSTORE() 1186 Op->getVTList(), Args, MemVT, in LowerSTORE() 1206 if (MemVT.bitsLT(MVT::i32)) in LowerSTORE() [all …]
|
H A D | SIISelLowering.h | 52 SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 129 ArrayRef<SDValue> Ops, EVT MemVT, 143 SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val, 179 EVT MemVT, 323 bool canMergeStoresTo(unsigned AS, EVT MemVT,
|
H A D | AMDGPUISelLowering.cpp | 200 for (auto MemVT : in AMDGPUTargetLowering() 202 setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT, in AMDGPUTargetLowering() 958 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, in storeOfVectorConstantIsCheap() argument 1190 EVT MemVT = ArgVT; in analyzeFormalArgumentsCompute() local 1199 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1201 MemVT = ArgVT; in analyzeFormalArgumentsCompute() 1209 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1214 MemVT = ArgVT.getScalarType(); in analyzeFormalArgumentsCompute() 1217 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1222 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); in analyzeFormalArgumentsCompute() [all …]
|
H A D | SIISelLowering.cpp | 1677 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, in canMergeStoresTo() argument 1680 return (MemVT.getSizeInBits() <= 4 * 32); in canMergeStoresTo() 1683 return (MemVT.getSizeInBits() <= MaxPrivateBits); in canMergeStoresTo() 1686 return (MemVT.getSizeInBits() <= 2 * 32); in canMergeStoresTo() 2009 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, in convertArgType() argument 2015 VT.getVectorNumElements() != MemVT.getVectorNumElements()) { in convertArgType() 2017 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), in convertArgType() 2025 VT.bitsLT(MemVT)) { in convertArgType() 2027 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); in convertArgType() 2030 if (MemVT.isFloatingPoint()) in convertArgType() [all …]
|
H A D | AMDGPUISelLowering.h | 225 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/VE/ |
H A D | VEISelLowering.cpp | 1378 EVT MemVT = LdNode->getMemoryVT(); in lowerLoadI1() local 1379 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) { in lowerLoadI1() 1381 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT); in lowerLoadI1() 1400 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) { in lowerLoadI1() 1402 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT); in lowerLoadI1() 1429 EVT MemVT = LdNode->getMemoryVT(); in lowerLOAD() local 1432 if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT)) in lowerLOAD() 1442 if (MemVT == MVT::f128) in lowerLOAD() 1444 if (isMaskType(MemVT)) in lowerLOAD() 1502 EVT MemVT = StNode->getMemoryVT(); in lowerStoreI1() local [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | SelectionDAG.cpp | 8635 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument 8639 ID.AddInteger(MemVT.getRawBits()); in getAtomic() 8650 VTList, MemVT, MMO); in getAtomic() 8659 EVT MemVT, SDVTList VTs, SDValue Chain, in getAtomicCmpSwap() argument 8667 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); in getAtomicCmpSwap() 8670 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument 8699 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); in getAtomic() 8702 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument 8709 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); in getAtomic() 8726 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, in getMemIntrinsicNode() argument [all …]
|
H A D | DAGCombiner.cpp | 754 EVT &MemVT, unsigned ShAmt = 0); 779 EVT MemVT, unsigned NumStores, 807 EVT MemVT, SDNode *Root, bool AllowVectors); 814 unsigned NumConsecutiveStores, EVT MemVT, 820 unsigned NumConsecutiveStores, EVT MemVT, 1433 EVT MemVT = LD->getMemoryVT(); in PromoteOperand() local 1439 MemVT, LD->getMemOperand()); in PromoteOperand() 1670 EVT MemVT = LD->getMemoryVT(); in PromoteLoad() local 1675 MemVT, LD->getMemOperand()); in PromoteLoad() 6455 ISD::LoadExtType ExtType, EVT &MemVT, in isLegalNarrowLdSt() argument [all …]
|
H A D | SelectionDAGBuilder.cpp | 2903 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); in visitSwitchCase() local 2922 if (CondLHS.getValueType() != MemVT) { in visitSwitchCase() 2923 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT); in visitSwitchCase() 2924 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT); in visitSwitchCase() 3647 EVT MemVT = in visitICmp() local 3653 if (Op1.getValueType() != MemVT) { in visitICmp() 3654 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT); in visitICmp() 3655 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT); in visitICmp() 5060 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); in visitAtomicCmpXchg() local 5061 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); in visitAtomicCmpXchg() [all …]
|
H A D | LegalizeVectorTypes.cpp | 1336 void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT, in IncrementPointer() 1340 unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8; in IncrementPointer() 1342 if (MemVT.isScalableVector()) { in IncrementPointer() 3046 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), in SplitVecRes_VP_REVERSE() 3048 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); in SplitVecRes_VP_REVERSE() 3073 EVL, MemVT, StoreMMO, ISD::UNINDEXED); in SplitVecRes_VP_REVERSE() 7315 for (EVT MemVT : reverse(MVT::integer_valuetypes())) { in findMemType() 7316 unsigned MemVTWidth = MemVT.getSizeInBits(); in findMemType() 7317 if (MemVT.getSizeInBits() <= WidenEltWidth) in findMemType() 7319 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); in findMemType() 1332 IncrementPointer(MemSDNode * N,EVT MemVT,MachinePointerInfo & MPI,SDValue & Ptr,uint64_t * ScaledOffset) IncrementPointer() argument 3042 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), SplitVecRes_VP_REVERSE() local 7311 for (EVT MemVT : reverse(MVT::integer_valuetypes())) { findMemType() local 7332 for (EVT MemVT : reverse(MVT::vector_valuetypes())) { findMemType() local 7488 for (EVT MemVT : MemVTs) { GenWidenVectorLoads() local [all...] |
H A D | LegalizeDAG.cpp | 500 EVT MemVT = ST->getMemoryVT(); in LegalizeStoreOps() local 502 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeStoreOps() 611 EVT MemVT = ST->getMemoryVT(); in LegalizeStoreOps() local 614 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeStoreOps() 672 EVT MemVT = LD->getMemoryVT(); in LegalizeLoadOps() local 676 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeLoadOps() 859 EVT MemVT = LD->getMemoryVT(); in LegalizeLoadOps() local 861 if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, in LegalizeLoadOps() 1516 EVT MemVT = isa<BuildVectorSDNode>(Node) ? VT.getVectorElementType() in ExpandVectorBuildThroughStack() local 1526 unsigned TypeByteSize = MemVT.getSizeInBits() / 8; in ExpandVectorBuildThroughStack() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86ISelLowering.h | 1104 bool mergeStoresAfterLegalization(EVT MemVT) const override { in mergeStoresAfterLegalization() argument 1105 return !MemVT.isVector(); in mergeStoresAfterLegalization() 1108 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 1469 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, in storeOfVectorConstantIsCheap() argument
|
H A D | X86ISelDAGToDAG.cpp | 1383 MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT; in PreprocessISelDAG() local 1384 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); in PreprocessISelDAG() 1393 CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT); in PreprocessISelDAG() 1395 MemTmp, MPI, MemVT); in PreprocessISelDAG() 1439 MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT; in PreprocessISelDAG() local 1440 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); in PreprocessISelDAG() 1453 Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT, in PreprocessISelDAG() 1462 assert(SrcVT == MemVT && "Unexpected VT!"); in PreprocessISelDAG() 1471 X86ISD::FLD, dl, VTs, Ops, MemVT, MPI, in PreprocessISelDAG() 1479 assert(DstVT == MemVT && "Unexpected VT!"); in PreprocessISelDAG() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelLowering.cpp | 6197 EVT MemVT = MGT->getMemoryVT(); in LowerMGATHER() local 6206 DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, in LowerMGATHER() 6218 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { in LowerMGATHER() 6226 return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, in LowerMGATHER() 6237 MemVT = MemVT.changeVectorElementTypeToInteger(); in LowerMGATHER() 6259 MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType()); in LowerMGATHER() 6268 DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL, in LowerMGATHER() 6296 EVT MemVT = MSC->getMemoryVT(); in LowerMSCATTER() local 6306 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { in LowerMSCATTER() 6314 return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops, in LowerMSCATTER() [all …]
|
H A D | AArch64ISelLowering.h | 842 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, in canMergeStoresTo() argument 850 return (MemVT.getSizeInBits() <= 64); in canMergeStoresTo()
|
H A D | AArch64SVEInstrInfo.td | 2999 SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> { 3002 def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)), 3008 …def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), 3013 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), 3047 …ass ldnf1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> { 3050 …def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), 3055 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), 3087 …tion I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern Ad… 3090 def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)), 3095 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZISelDAGToDAG.cpp | 1401 EVT MemVT = StoreNode->getMemoryVT(); in tryFoldLoadStoreIntoMemOperand() local 1411 if (MemVT == MVT::i32) in tryFoldLoadStoreIntoMemOperand() 1413 else if (MemVT == MVT::i64) in tryFoldLoadStoreIntoMemOperand() 1422 if (MemVT == MVT::i32) in tryFoldLoadStoreIntoMemOperand() 1424 else if (MemVT == MVT::i64) in tryFoldLoadStoreIntoMemOperand() 1446 Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT); in tryFoldLoadStoreIntoMemOperand()
|
H A D | SystemZISelLowering.cpp | 4646 EVT MemVT = Node->getMemoryVT(); in lowerATOMIC_LOAD_SUB() local 4647 if (MemVT == MVT::i32 || MemVT == MVT::i64) { in lowerATOMIC_LOAD_SUB() 4649 assert(Op.getValueType() == MemVT && "Mismatched VTs"); in lowerATOMIC_LOAD_SUB() 4655 DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), Src2); in lowerATOMIC_LOAD_SUB() 4656 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, in lowerATOMIC_LOAD_SUB() 7044 EVT MemVT = SN->getMemoryVT(); in combineSTORE() local 7049 if (MemVT.isInteger() && SN->isTruncatingStore()) { in combineSTORE() 7051 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { in combineSTORE() 7077 Ops, MemVT, SN->getMemOperand()); in combineSTORE() 7093 Ops, MemVT, SN->getMemOperand()); in combineSTORE() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMISelLowering.h | 689 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, in canMergeStoresTo() argument 692 return (MemVT.getSizeInBits() <= 32); in canMergeStoresTo()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsISelLowering.cpp | 2689 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT(); in createLoadLR() local 2699 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT, in createLoadLR() 2706 EVT MemVT = LD->getMemoryVT(); in lowerLOAD() local 2712 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) || in lowerLOAD() 2713 ((MemVT != MVT::i32) && (MemVT != MVT::i64))) in lowerLOAD() 2771 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType(); in createStoreLR() local 2780 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT, in createStoreLR() 2832 EVT MemVT = SD->getMemoryVT(); in lowerSTORE() local 2836 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) && in lowerSTORE() 2837 ((MemVT == MVT::i32) || (MemVT == MVT::i64))) in lowerSTORE()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCISelLowering.cpp | 3012 EVT MemVT = LD->getMemoryVT(); in usePartialVectorLoads() local 3013 if (!MemVT.isSimple()) in usePartialVectorLoads() 3015 switch(MemVT.getSimpleVT().SimpleTy) { in usePartialVectorLoads() 8536 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, in canReuseLoadAddress() argument 8560 if (LD->getMemoryVT() != MemVT) in canReuseLoadAddress() 9424 EVT MemVT = InputNode->getMemoryVT(); in isValidSplatLoad() local 9430 (MemVT == Ty.getVectorElementType())) in isValidSplatLoad() 9436 if (MemVT == MVT::i32) { in isValidSplatLoad() 11227 EVT MemVT = AtomicNode->getMemoryVT(); in LowerATOMIC_CMP_SWAP() local 11228 if (MemVT.getSizeInBits() >= 32) in LowerATOMIC_CMP_SWAP() [all …]
|