/freebsd/contrib/llvm-project/llvm/lib/Target/Lanai/ |
H A D | LanaiSchedule.td | 55 def LdSt : ProcResource<1> { let BufferSize = 0; } 64 def : WriteRes<WriteLD, [LdSt]> { let Latency = 2; } 65 def : WriteRes<WriteST, [LdSt]> { let Latency = 2; } 66 def : WriteRes<WriteLDSW, [LdSt]> { let Latency = 2; } 67 def : WriteRes<WriteSTSW, [LdSt]> { let Latency = 4; }
|
H A D | LanaiInstrInfo.cpp | 755 const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, in getMemOperandWithOffsetWidth() argument 759 if (LdSt.getNumOperands() != 4) in getMemOperandWithOffsetWidth() 761 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm() || in getMemOperandWithOffsetWidth() 762 !(LdSt.getOperand(3).isImm() && LdSt.getOperand(3).getImm() == LPAC::ADD)) in getMemOperandWithOffsetWidth() 765 switch (LdSt.getOpcode()) { in getMemOperandWithOffsetWidth() 786 BaseOp = &LdSt.getOperand(1); in getMemOperandWithOffsetWidth() 787 Offset = LdSt.getOperand(2).getImm(); in getMemOperandWithOffsetWidth() 796 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument 799 switch (LdSt.getOpcode()) { in getMemOperandsWithOffsetWidth() 813 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) in getMemOperandsWithOffsetWidth()
|
H A D | LanaiInstrInfo.h | 71 const MachineInstr &LdSt, 76 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCInstrInfo.cpp | 2838 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument 2843 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) in getMemOperandsWithOffsetWidth() 2849 static bool isLdStSafeToCluster(const MachineInstr &LdSt, in isLdStSafeToCluster() argument 2852 if (LdSt.hasOrderedMemoryRef() || LdSt.getNumExplicitOperands() != 3) in isLdStSafeToCluster() 2855 if (LdSt.getOperand(2).isFI()) in isLdStSafeToCluster() 2858 assert(LdSt.getOperand(2).isReg() && "Expected a reg operand."); in isLdStSafeToCluster() 2861 if (LdSt.modifiesRegister(LdSt.getOperand(2).getReg(), TRI)) in isLdStSafeToCluster() 5528 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, in getMemOperandWithOffsetWidth() argument 5530 if (!LdSt.mayLoadOrStore() || LdSt.getNumExplicitOperands() != 3) in getMemOperandWithOffsetWidth() 5534 if (!LdSt.getOperand(1).isImm() || in getMemOperandWithOffsetWidth() [all …]
|
H A D | PPCInstrInfo.h | 558 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, 568 const MachineInstr &LdSt,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/ |
H A D | PPCInstructionSelector.cpp | 730 GLoadStore &LdSt = cast<GLoadStore>(I); in select() local 731 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in select() 741 I.getOpcode(), RBI.getRegBank(LdSt.getReg(0), MRI, TRI)->getID(), in select() 742 LdSt.getMemSizeInBits().getValue()); in select()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 1143 bool CombinerHelper::isIndexedLoadStoreLegal(GLoadStore &LdSt) const { in isIndexedLoadStoreLegal() 1145 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in isIndexedLoadStoreLegal() 1146 LLT Ty = MRI.getType(LdSt.getReg(0)); in isIndexedLoadStoreLegal() 1147 LLT MemTy = LdSt.getMMO().getMemoryType(); in isIndexedLoadStoreLegal() 1151 unsigned IndexedOpc = getIndexedOpc(LdSt.getOpcode()); in isIndexedLoadStoreLegal() 1167 bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr, in findPostIndexCandidate() argument 1177 Register Ptr = LdSt.getPointerReg(); in findPostIndexCandidate() 1182 if (!isIndexedLoadStoreLegal(LdSt)) in findPostIndexCandidate() 1188 MachineInstr *StoredValDef = getDefIgnoringCopies(LdSt.getReg(0), MRI); in findPostIndexCandidate() 1209 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(), Offset, in findPostIndexCandidate() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIInstrInfo.cpp | 357 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument 360 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth() 363 unsigned Opc = LdSt.getOpcode(); in getMemOperandsWithOffsetWidth() 368 if (isDS(LdSt)) { in getMemOperandsWithOffsetWidth() 369 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOperandsWithOffsetWidth() 370 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOperandsWithOffsetWidth() 384 Width = getOpSize(LdSt, DataOpIdx); in getMemOperandsWithOffsetWidth() 390 getNamedOperand(LdSt, AMDGPU::OpName::offset0); in getMemOperandsWithOffsetWidth() 392 getNamedOperand(LdSt, AMDGPU::OpName::offset1); in getMemOperandsWithOffsetWidth() 403 if (LdSt.mayLoad()) in getMemOperandsWithOffsetWidth() [all …]
|
H A D | SIInstrInfo.h | 241 const MachineInstr &LdSt,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PostLegalizerCombiner.cpp | 715 if (auto *LdSt = dyn_cast<GLoadStore>(&MI); in optimizeConsecutiveMemOpAddressing() local 716 LdSt && MRI.getType(LdSt->getOperand(0).getReg()).isScalableVector()) in optimizeConsecutiveMemOpAddressing()
|
H A D | AArch64InstructionSelector.cpp | 2874 GLoadStore &LdSt = cast<GLoadStore>(I); in select() local 2876 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); in select() 2884 uint64_t MemSizeInBytes = LdSt.getMemSize().getValue(); in select() 2885 unsigned MemSizeInBits = LdSt.getMemSizeInBits().getValue(); in select() 2886 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering(); in select() 2892 assert(!isa<GZExtLoad>(LdSt)); in select() 2896 if (isa<GLoad>(LdSt)) { in select() 2909 Register ValReg = LdSt.getReg(0); in select() 2924 const Register PtrReg = LdSt.getPointerReg(); in select() 2933 const Register ValReg = LdSt.getReg(0); in select() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVInstrInfo.cpp | 2618 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() 2621 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth() 2625 switch (LdSt.getOpcode()) { in getMemOperandsWithOffsetWidth() 2649 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) 2723 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, in getMemOperandWithOffsetWidth() 2725 if (!LdSt.mayLoadOrStore()) in getMemOperandWithOffsetWidth() 2731 if (LdSt.getNumExplicitOperands() != 3) in getMemOperandWithOffsetWidth() 2733 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || in getMemOperandWithOffsetWidth() 2734 !LdSt in getMemOperandWithOffsetWidth() 2611 getMemOperandsWithOffsetWidth(const MachineInstr & LdSt,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,LocationSize & Width,const TargetRegisterInfo * TRI) const getMemOperandsWithOffsetWidth() argument 2716 getMemOperandWithOffsetWidth(const MachineInstr & LdSt,const MachineOperand * & BaseReg,int64_t & Offset,LocationSize & Width,const TargetRegisterInfo * TRI) const getMemOperandWithOffsetWidth() argument [all...] |
H A D | RISCVInstrInfo.h | 181 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64InstrInfo.cpp | 2700 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument 2703 if (!LdSt.mayLoadOrStore()) in getMemOperandsWithOffsetWidth() 2708 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, OffsetIsScalable, in getMemOperandsWithOffsetWidth() 3491 const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, in getMemOperandWithOffsetWidth() argument 3494 assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); in getMemOperandWithOffsetWidth() 3496 if (LdSt.getNumExplicitOperands() == 3) { in getMemOperandWithOffsetWidth() 3498 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || in getMemOperandWithOffsetWidth() 3499 !LdSt.getOperand(2).isImm()) in getMemOperandWithOffsetWidth() 3501 } else if (LdSt.getNumExplicitOperands() == 4) { in getMemOperandWithOffsetWidth() 3503 if (!LdSt.getOperand(1).isReg() || in getMemOperandWithOffsetWidth() [all …]
|
H A D | AArch64InstrInfo.h | 315 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
|
H A D | AArch64SchedTSV110.td | 135 // MicroOp Count/Types: #(ALU|AB|MDU|FSU1|FSU2|LdSt|ALUAB|F|FLdSt) 138 // 1 micro-ops to be issued down one ALU pipe, six MDU pipes and four LdSt pipes.
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonInstrInfo.h | 209 const MachineInstr &LdSt,
|
H A D | HexagonInstrInfo.cpp | 3073 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, in getMemOperandsWithOffsetWidth() argument 3077 const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width); in getMemOperandsWithOffsetWidth()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.h | 398 const MachineInstr &LdSt,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZISelDAGToDAG.cpp | 1526 auto *LdSt = dyn_cast<LSBaseSDNode>(MemAccess); in storeLoadIsAligned() local 1535 (LdSt && !LdSt->getOffset().isUndef())) in storeLoadIsAligned()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | CombinerHelper.h | 891 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMScheduleR52.td | 551 foreach Num = 1-32 in { // reserve LdSt resource, no dual-issue
|
H A D | ARMISelDAGToDAG.cpp | 1093 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); in SelectAddrMode6Offset() local 1094 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); in SelectAddrMode6Offset() 1099 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) in SelectAddrMode6Offset()
|