/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUPostLegalizerCombiner.cpp | 384 MachineInstr *LoadMI = MRI.getVRegDef(LoadReg); in matchCombineSignExtendInReg() local 386 switch (LoadMI->getOpcode()) { in matchCombineSignExtendInReg() 388 MatchData = {LoadMI, AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE}; in matchCombineSignExtendInReg() 391 MatchData = {LoadMI, AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT}; in matchCombineSignExtendInReg() 394 MatchData = {LoadMI, AMDGPU::G_AMDGPU_S_BUFFER_LOAD_SBYTE}; in matchCombineSignExtendInReg() 397 MatchData = {LoadMI, AMDGPU::G_AMDGPU_S_BUFFER_LOAD_SSHORT}; in matchCombineSignExtendInReg() 407 auto [LoadMI, NewOpcode] = MatchData; in applyCombineSignExtendInReg() 408 LoadMI->setDesc(TII.get(NewOpcode)); in applyCombineSignExtendInReg() 412 LoadMI->getOperand(0).setReg(SignExtendInsnDst); in applyCombineSignExtendInReg()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | TargetInstrInfo.cpp | 731 MachineInstr &LoadMI, in foldMemoryOperand() argument 733 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); in foldMemoryOperand() 749 isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperand() 754 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperand() 758 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); in foldMemoryOperand() 766 NewMI->setMemRefs(MF, LoadMI.memoperands()); in foldMemoryOperand() 770 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), in foldMemoryOperand() 771 E = LoadMI.memoperands_end(); in foldMemoryOperand()
|
H A D | InlineSpiller.cpp | 221 MachineInstr *LoadMI = nullptr); 890 MachineInstr *LoadMI) { in foldMemoryOperand() argument 938 if (LoadMI && MO.isDef()) in foldMemoryOperand() 969 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) in foldMemoryOperand()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 593 PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI, in ChoosePreferredUse() argument 623 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) { in ChoosePreferredUse() 712 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); in matchCombineExtendingLoads() local 713 if (!LoadMI) in matchCombineExtendingLoads() 716 Register LoadReg = LoadMI->getDstReg(); in matchCombineExtendingLoads() 749 const auto &MMO = LoadMI->getMMO(); in matchCombineExtendingLoads() 758 LLT SrcTy = MRI.getType(LoadMI->getPointerReg()); in matchCombineExtendingLoads() 915 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); in matchCombineLoadWithAndMask() local 916 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg())) in matchCombineLoadWithAndMask() 919 Register LoadReg = LoadMI->getDstReg(); in matchCombineLoadWithAndMask() [all …]
|
H A D | LegalizerHelper.cpp | 1356 auto &LoadMI = cast<GLoad>(MI); in narrowScalar() local 1357 Register DstReg = LoadMI.getDstReg(); in narrowScalar() 1362 if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) { in narrowScalar() 1364 MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO()); in narrowScalar() 1366 LoadMI.eraseFromParent(); in narrowScalar() 1370 return reduceLoadStoreWidth(LoadMI, TypeIdx, NarrowTy); in narrowScalar() 1374 auto &LoadMI = cast<GExtLoad>(MI); in narrowScalar() local 1375 Register DstReg = LoadMI.getDstReg(); in narrowScalar() 1376 Register PtrReg = LoadMI.getPointerReg(); in narrowScalar() 1379 auto &MMO = LoadMI.getMMO(); in narrowScalar() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsInstrInfo.h | 101 const MachineInstr &LoadMI) const;
|
H A D | MipsInstrInfo.cpp | 617 const MachineInstr &LoadMI) const { in SafeInLoadDelaySlot() 621 return !llvm::any_of(LoadMI.defs(), [&](const MachineOperand &Op) { in SafeInLoadDelaySlot()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZInstrInfo.cpp | 1522 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, in foldMemoryOperandImpl() argument 1553 if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc) in foldMemoryOperandImpl() 1558 assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold."); in foldMemoryOperandImpl() 1559 assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB."); in foldMemoryOperandImpl() 1575 Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg(); in foldMemoryOperandImpl() 1586 MachineOperand &Base = LoadMI.getOperand(1); in foldMemoryOperandImpl() 1587 MachineOperand &Disp = LoadMI.getOperand(2); in foldMemoryOperandImpl() 1588 MachineOperand &Indx = LoadMI.getOperand(3); in foldMemoryOperandImpl()
|
H A D | SystemZInstrInfo.h | 307 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64LoadStoreOptimizer.cpp | 1298 MachineInstr &LoadMI = *I; in findMatchingStore() local 1299 Register BaseReg = AArch64InstrInfo::getLdStBaseOp(LoadMI).getReg(); in findMatchingStore() 1327 if (MI.mayStore() && isMatchingStore(LoadMI, MI) && in findMatchingStore() 1330 isLdOffsetInRangeOfSt(LoadMI, MI, TII) && in findMatchingStore() 1348 if (MI.mayStore() && LoadMI.mayAlias(AA, MI, /*UseTBAA*/ false)) in findMatchingStore()
|
H A D | AArch64FastISel.cpp | 4531 const auto *LoadMI = MI; in optimizeIntExtLoad() local 4532 if (LoadMI->getOpcode() == TargetOpcode::COPY && in optimizeIntExtLoad() 4533 LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) { in optimizeIntExtLoad() 4535 LoadMI = MRI.getUniqueVRegDef(LoadReg); in optimizeIntExtLoad() 4536 assert(LoadMI && "Expected valid instruction"); in optimizeIntExtLoad() 4538 if (!(IsZExt && isZExtLoad(LoadMI)) && !(!IsZExt && isSExtLoad(LoadMI))) in optimizeIntExtLoad()
|
H A D | AArch64InstrInfo.cpp | 5644 MachineInstr &LoadMI = *--InsertPt; in foldMemoryOperandImpl() local 5645 MachineOperand &LoadDst = LoadMI.getOperand(0); in foldMemoryOperandImpl() 5649 return &LoadMI; in foldMemoryOperandImpl()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | TargetInstrInfo.h | 1205 MachineInstr &LoadMI, 1364 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 7564 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, in isNonFoldablePartialRegisterLoad() argument 7567 unsigned Opc = LoadMI.getOpcode(); in isNonFoldablePartialRegisterLoad() 7571 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); in isNonFoldablePartialRegisterLoad() 7988 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, in foldMemoryOperandImpl() argument 7999 unsigned NumOps = LoadMI.getDesc().getNumOperands(); in foldMemoryOperandImpl() 8001 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { in foldMemoryOperandImpl() 8002 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) in foldMemoryOperandImpl() 8019 unsigned LoadOpc = LoadMI.getOpcode(); in foldMemoryOperandImpl() 8020 if (LoadMI.hasOneMemOperand()) in foldMemoryOperandImpl() 8021 Alignment = (*LoadMI.memoperands_begin())->getAlign(); in foldMemoryOperandImpl() [all …]
|
H A D | X86InstrInfo.h | 461 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
|
H A D | X86FastISel.cpp | 791 MachineInstrBuilder LoadMI = in handleConstantAddresses() local 793 addFullAddress(LoadMI, StubAM); in handleConstantAddresses()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCMIPeephole.cpp | 668 MachineInstr *LoadMI = MRI->getVRegDef(FeedReg1); in simplifyCode() local 669 if (LoadMI && LoadMI->getOpcode() == PPC::LXVDSX) in simplifyCode()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64InstructionSelector.cpp | 2671 auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB); in select() local 2672 if (!LoadMI) { in select() 2676 MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()}); in select() 3303 auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI); in select() local 3306 if (LoadMI && IsGPR) { in select() 3307 const MachineMemOperand *MemOp = *LoadMI->memoperands_begin(); in select() 4183 MachineInstr *LoadMI = nullptr; in emitLoadFromConstantPool() local 4188 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {}).addConstantPoolIndex(CPIdx); in emitLoadFromConstantPool() 4194 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {Adrp}) in emitLoadFromConstantPool() 4202 LoadMI->addMemOperand(MF, MF.getMachineMemOperand(PtrInfo, in emitLoadFromConstantPool() [all …]
|