Home
last modified time | relevance | path

Searched refs:LdSt (Results 1 – 23 of 23) sorted by relevance

/freebsd/contrib/llvm-project/llvm/lib/Target/Lanai/
H A DLanaiSchedule.td55 def LdSt : ProcResource<1> { let BufferSize = 0; }
64 def : WriteRes<WriteLD, [LdSt]> { let Latency = 2; }
65 def : WriteRes<WriteST, [LdSt]> { let Latency = 2; }
66 def : WriteRes<WriteLDSW, [LdSt]> { let Latency = 2; }
67 def : WriteRes<WriteSTSW, [LdSt]> { let Latency = 4; }
H A DLanaiInstrInfo.cpp755 const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, in getMemOperandWithOffsetWidth() argument
759 if (LdSt.getNumOperands() != 4) in getMemOperandWithOffsetWidth()
761 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm() || in getMemOperandWithOffsetWidth()
762 !(LdSt.getOperand(3).isImm() && LdSt.getOperand(3).getImm() == LPAC::ADD)) in getMemOperandWithOffsetWidth()
765 switch (LdSt.getOpcode()) { in getMemOperandWithOffsetWidth()
786 BaseOp = &LdSt.getOperand(1); in getMemOperandWithOffsetWidth()
787 Offset = LdSt.getOperand(2).getImm(); in getMemOperandWithOffsetWidth()
796 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument
799 switch (LdSt.getOpcode()) { in getMemOperandsWithOffsetWidth()
813 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) in getMemOperandsWithOffsetWidth()
H A DLanaiInstrInfo.h71 const MachineInstr &LdSt,
76 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/
H A DPPCInstrInfo.cpp2838 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument
2843 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) in getMemOperandsWithOffsetWidth()
2849 static bool isLdStSafeToCluster(const MachineInstr &LdSt, in isLdStSafeToCluster() argument
2852 if (LdSt.hasOrderedMemoryRef() || LdSt.getNumExplicitOperands() != 3) in isLdStSafeToCluster()
2855 if (LdSt.getOperand(2).isFI()) in isLdStSafeToCluster()
2858 assert(LdSt.getOperand(2).isReg() && "Expected a reg operand."); in isLdStSafeToCluster()
2861 if (LdSt.modifiesRegister(LdSt.getOperand(2).getReg(), TRI)) in isLdStSafeToCluster()
5528 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, in getMemOperandWithOffsetWidth() argument
5530 if (!LdSt.mayLoadOrStore() || LdSt.getNumExplicitOperands() != 3) in getMemOperandWithOffsetWidth()
5534 if (!LdSt.getOperand(1).isImm() || in getMemOperandWithOffsetWidth()
[all …]
H A DPPCInstrInfo.h558 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
568 const MachineInstr &LdSt,
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/
H A DPPCInstructionSelector.cpp730 GLoadStore &LdSt = cast<GLoadStore>(I); in select() local
731 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in select()
741 I.getOpcode(), RBI.getRegBank(LdSt.getReg(0), MRI, TRI)->getID(), in select()
742 LdSt.getMemSizeInBits().getValue()); in select()
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/
H A DCombinerHelper.cpp1143 bool CombinerHelper::isIndexedLoadStoreLegal(GLoadStore &LdSt) const { in isIndexedLoadStoreLegal()
1145 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in isIndexedLoadStoreLegal()
1146 LLT Ty = MRI.getType(LdSt.getReg(0)); in isIndexedLoadStoreLegal()
1147 LLT MemTy = LdSt.getMMO().getMemoryType(); in isIndexedLoadStoreLegal()
1151 unsigned IndexedOpc = getIndexedOpc(LdSt.getOpcode()); in isIndexedLoadStoreLegal()
1167 bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr, in findPostIndexCandidate() argument
1177 Register Ptr = LdSt.getPointerReg(); in findPostIndexCandidate()
1182 if (!isIndexedLoadStoreLegal(LdSt)) in findPostIndexCandidate()
1188 MachineInstr *StoredValDef = getDefIgnoringCopies(LdSt.getReg(0), MRI); in findPostIndexCandidate()
1209 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(), Offset, in findPostIndexCandidate()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DSIInstrInfo.cpp357 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument
360 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth()
363 unsigned Opc = LdSt.getOpcode(); in getMemOperandsWithOffsetWidth()
368 if (isDS(LdSt)) { in getMemOperandsWithOffsetWidth()
369 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOperandsWithOffsetWidth()
370 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOperandsWithOffsetWidth()
384 Width = getOpSize(LdSt, DataOpIdx); in getMemOperandsWithOffsetWidth()
390 getNamedOperand(LdSt, AMDGPU::OpName::offset0); in getMemOperandsWithOffsetWidth()
392 getNamedOperand(LdSt, AMDGPU::OpName::offset1); in getMemOperandsWithOffsetWidth()
403 if (LdSt.mayLoad()) in getMemOperandsWithOffsetWidth()
[all …]
H A DSIInstrInfo.h241 const MachineInstr &LdSt,
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/
H A DAArch64PostLegalizerCombiner.cpp715 if (auto *LdSt = dyn_cast<GLoadStore>(&MI); in optimizeConsecutiveMemOpAddressing() local
716 LdSt && MRI.getType(LdSt->getOperand(0).getReg()).isScalableVector()) in optimizeConsecutiveMemOpAddressing()
H A DAArch64InstructionSelector.cpp2874 GLoadStore &LdSt = cast<GLoadStore>(I); in select() local
2876 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in select()
2884 uint64_t MemSizeInBytes = LdSt.getMemSize().getValue(); in select()
2885 unsigned MemSizeInBits = LdSt.getMemSizeInBits().getValue(); in select()
2886 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering(); in select()
2892 assert(!isa<GZExtLoad>(LdSt)); in select()
2896 if (isa<GLoad>(LdSt)) { in select()
2909 Register ValReg = LdSt.getReg(0); in select()
2924 const Register PtrReg = LdSt.getPointerReg(); in select()
2933 const Register ValReg = LdSt.getReg(0); in select()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/
H A DRISCVInstrInfo.cpp2618 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth()
2621 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth()
2625 switch (LdSt.getOpcode()) { in getMemOperandsWithOffsetWidth()
2649 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
2723 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, in getMemOperandWithOffsetWidth()
2725 if (!LdSt.mayLoadOrStore()) in getMemOperandWithOffsetWidth()
2731 if (LdSt.getNumExplicitOperands() != 3) in getMemOperandWithOffsetWidth()
2733 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || in getMemOperandWithOffsetWidth()
2734 !LdSt in getMemOperandWithOffsetWidth()
2611 getMemOperandsWithOffsetWidth(const MachineInstr & LdSt,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,LocationSize & Width,const TargetRegisterInfo * TRI) const getMemOperandsWithOffsetWidth() argument
2716 getMemOperandWithOffsetWidth(const MachineInstr & LdSt,const MachineOperand * & BaseReg,int64_t & Offset,LocationSize & Width,const TargetRegisterInfo * TRI) const getMemOperandWithOffsetWidth() argument
[all...]
H A DRISCVInstrInfo.h181 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/
H A DAArch64InstrInfo.cpp2700 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument
2703 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth()
2708 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, OffsetIsScalable, in getMemOperandsWithOffsetWidth()
3491 const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, in getMemOperandWithOffsetWidth() argument
3494 assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); in getMemOperandWithOffsetWidth()
3496 if (LdSt.getNumExplicitOperands() == 3) { in getMemOperandWithOffsetWidth()
3498 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || in getMemOperandWithOffsetWidth()
3499 !LdSt.getOperand(2).isImm()) in getMemOperandWithOffsetWidth()
3501 } else if (LdSt.getNumExplicitOperands() == 4) { in getMemOperandWithOffsetWidth()
3503 if (!LdSt.getOperand(1).isReg() || in getMemOperandWithOffsetWidth()
[all …]
H A DAArch64InstrInfo.h315 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
H A DAArch64SchedTSV110.td135 // MicroOp Count/Types: #(ALU|AB|MDU|FSU1|FSU2|LdSt|ALUAB|F|FLdSt)
138 // 1 micro-ops to be issued down one ALU pipe, six MDU pipes and four LdSt pipes.
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/
H A DHexagonInstrInfo.h209 const MachineInstr &LdSt,
H A DHexagonInstrInfo.cpp3073 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument
3077 const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width); in getMemOperandsWithOffsetWidth()
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/
H A DX86InstrInfo.h398 const MachineInstr &LdSt,
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/
H A DSystemZISelDAGToDAG.cpp1526 auto *LdSt = dyn_cast<LSBaseSDNode>(MemAccess); in storeLoadIsAligned() local
1535 (LdSt && !LdSt->getOffset().isUndef())) in storeLoadIsAligned()
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/
H A DCombinerHelper.h891 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/
H A DARMScheduleR52.td551 foreach Num = 1-32 in { // reserve LdSt resource, no dual-issue
H A DARMISelDAGToDAG.cpp1093 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); in SelectAddrMode6Offset() local
1094 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); in SelectAddrMode6Offset()
1099 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) in SelectAddrMode6Offset()