Home
last modified time | relevance | path

Searched refs:LoadMI (Results 1 – 19 of 19) sorted by relevance

/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DAMDGPUPostLegalizerCombiner.cpp384 MachineInstr *LoadMI = MRI.getVRegDef(LoadReg); in matchCombineSignExtendInReg() local
386 switch (LoadMI->getOpcode()) { in matchCombineSignExtendInReg()
388 MatchData = {LoadMI, AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE}; in matchCombineSignExtendInReg()
391 MatchData = {LoadMI, AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT}; in matchCombineSignExtendInReg()
394 MatchData = {LoadMI, AMDGPU::G_AMDGPU_S_BUFFER_LOAD_SBYTE}; in matchCombineSignExtendInReg()
397 MatchData = {LoadMI, AMDGPU::G_AMDGPU_S_BUFFER_LOAD_SSHORT}; in matchCombineSignExtendInReg()
407 auto [LoadMI, NewOpcode] = MatchData; in applyCombineSignExtendInReg()
408 LoadMI->setDesc(TII.get(NewOpcode)); in applyCombineSignExtendInReg()
412 LoadMI->getOperand(0).setReg(SignExtendInsnDst); in applyCombineSignExtendInReg()
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/
H A DTargetInstrInfo.cpp806 MachineInstr &LoadMI, in foldMemoryOperand() argument
808 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); in foldMemoryOperand()
824 isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperand()
829 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperand()
833 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); in foldMemoryOperand()
841 NewMI->setMemRefs(MF, LoadMI.memoperands()); in foldMemoryOperand()
845 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), in foldMemoryOperand()
846 E = LoadMI.memoperands_end(); in foldMemoryOperand()
H A DInlineSpiller.cpp219 MachineInstr *LoadMI = nullptr);
909 MachineInstr *LoadMI) { in foldMemoryOperand() argument
957 if (LoadMI && MO.isDef()) in foldMemoryOperand()
988 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) in foldMemoryOperand()
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/
H A DCombinerHelper.cpp659 PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI, in ChoosePreferredUse() argument
689 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) { in ChoosePreferredUse()
778 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); in matchCombineExtendingLoads() local
779 if (!LoadMI) in matchCombineExtendingLoads()
782 Register LoadReg = LoadMI->getDstReg(); in matchCombineExtendingLoads()
815 const auto &MMO = LoadMI->getMMO(); in matchCombineExtendingLoads()
824 LLT SrcTy = MRI.getType(LoadMI->getPointerReg()); in matchCombineExtendingLoads()
980 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); in matchCombineLoadWithAndMask() local
981 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg())) in matchCombineLoadWithAndMask()
984 Register LoadReg = LoadMI->getDstReg(); in matchCombineLoadWithAndMask()
[all …]
H A DLegalizerHelper.cpp1619 auto &LoadMI = cast<GLoad>(MI); in narrowScalar() local
1620 Register DstReg = LoadMI.getDstReg(); in narrowScalar()
1625 if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) { in narrowScalar()
1627 MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO()); in narrowScalar()
1629 LoadMI.eraseFromParent(); in narrowScalar()
1633 return reduceLoadStoreWidth(LoadMI, TypeIdx, NarrowTy); in narrowScalar()
1637 auto &LoadMI = cast<GExtLoad>(MI); in narrowScalar() local
1638 Register DstReg = LoadMI.getDstReg(); in narrowScalar()
1639 Register PtrReg = LoadMI.getPointerReg(); in narrowScalar()
1642 auto &MMO = LoadMI.getMMO(); in narrowScalar()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/
H A DMipsInstrInfo.h105 const MachineInstr &LoadMI) const;
H A DMipsInstrInfo.cpp633 const MachineInstr &LoadMI) const { in SafeInLoadDelaySlot()
637 return !llvm::any_of(LoadMI.defs(), [&](const MachineOperand &Op) { in SafeInLoadDelaySlot()
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/
H A DSystemZInstrInfo.cpp1555 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, in foldMemoryOperandImpl() argument
1586 if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc) in foldMemoryOperandImpl()
1591 assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold."); in foldMemoryOperandImpl()
1592 assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB."); in foldMemoryOperandImpl()
1608 Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg(); in foldMemoryOperandImpl()
1619 MachineOperand &Base = LoadMI.getOperand(1); in foldMemoryOperandImpl()
1620 MachineOperand &Disp = LoadMI.getOperand(2); in foldMemoryOperandImpl()
1621 MachineOperand &Indx = LoadMI.getOperand(3); in foldMemoryOperandImpl()
H A DSystemZInstrInfo.h307 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/
H A DAArch64LoadStoreOptimizer.cpp1566 MachineInstr &LoadMI = *I; in findMatchingStore() local
1567 Register BaseReg = AArch64InstrInfo::getLdStBaseOp(LoadMI).getReg(); in findMatchingStore()
1595 if (MI.mayStore() && isMatchingStore(LoadMI, MI) && in findMatchingStore()
1598 isLdOffsetInRangeOfSt(LoadMI, MI, TII) && in findMatchingStore()
1616 if (MI.mayStore() && LoadMI.mayAlias(AA, MI, /*UseTBAA*/ false)) in findMatchingStore()
H A DAArch64FastISel.cpp4534 const auto *LoadMI = MI; in optimizeIntExtLoad() local
4535 if (LoadMI->getOpcode() == TargetOpcode::COPY && in optimizeIntExtLoad()
4536 LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) { in optimizeIntExtLoad()
4538 LoadMI = MRI.getUniqueVRegDef(LoadReg); in optimizeIntExtLoad()
4539 assert(LoadMI && "Expected valid instruction"); in optimizeIntExtLoad()
4541 if (!(IsZExt && isZExtLoad(LoadMI)) && !(!IsZExt && isSExtLoad(LoadMI))) in optimizeIntExtLoad()
H A DAArch64InstrInfo.cpp6313 MachineInstr &LoadMI = *--InsertPt; in foldMemoryOperandImpl() local
6314 MachineOperand &LoadDst = LoadMI.getOperand(0); in foldMemoryOperandImpl()
6318 return &LoadMI; in foldMemoryOperandImpl()
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/
H A DTargetInstrInfo.h1236 MachineInstr &LoadMI,
1430 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/
H A DX86InstrInfo.cpp7651 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, in isNonFoldablePartialRegisterLoad() argument
7654 unsigned Opc = LoadMI.getOpcode(); in isNonFoldablePartialRegisterLoad()
7658 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); in isNonFoldablePartialRegisterLoad()
8075 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, in foldMemoryOperandImpl() argument
8086 unsigned NumOps = LoadMI.getDesc().getNumOperands(); in foldMemoryOperandImpl()
8088 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperandImpl()
8089 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) in foldMemoryOperandImpl()
8108 if (!X86EnableAPXForRelocation && isMemInstrWithGOTPCREL(LoadMI) && in foldMemoryOperandImpl()
8114 unsigned LoadOpc = LoadMI.getOpcode(); in foldMemoryOperandImpl()
8115 if (LoadMI.hasOneMemOperand()) in foldMemoryOperandImpl()
[all …]
H A DX86InstrInfo.h509 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
H A DX86FastISel.cpp791 MachineInstrBuilder LoadMI = in handleConstantAddresses() local
793 addFullAddress(LoadMI, StubAM); in handleConstantAddresses()
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/
H A DARMInstructionSelector.cpp1106 if (auto *LoadMI = dyn_cast<GLoad>(&I)) { in select() local
1107 Register PtrReg = LoadMI->getPointerReg(); in select()
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/
H A DPPCMIPeephole.cpp663 MachineInstr *LoadMI = MRI->getVRegDef(FeedReg1); in simplifyCode() local
664 if (LoadMI && LoadMI->getOpcode() == PPC::LXVDSX) in simplifyCode()
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/
H A DAArch64InstructionSelector.cpp2800 auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB); in select() local
2801 if (!LoadMI) { in select()
2805 MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()}); in select()
3438 auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI); in select() local
3441 if (LoadMI && IsGPR) { in select()
3442 const MachineMemOperand *MemOp = *LoadMI->memoperands_begin(); in select()
4318 MachineInstr *LoadMI = nullptr; in emitLoadFromConstantPool() local
4323 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {}).addConstantPoolIndex(CPIdx); in emitLoadFromConstantPool()
4329 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {Adrp}) in emitLoadFromConstantPool()
4337 LoadMI->addMemOperand(MF, MF.getMachineMemOperand(PtrInfo, in emitLoadFromConstantPool()
[all …]