/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIFoldOperands.cpp | 25 MachineInstr *UseMI; member 39 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), in FoldCandidate() 78 bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo, 95 bool tryToFoldACImm(const MachineOperand &OpToFold, MachineInstr *UseMI, 99 MachineInstr *UseMI, 180 bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo, in frameIndexMayFold() argument 185 const unsigned Opc = UseMI.getOpcode(); in frameIndexMayFold() 186 if (TII->isMUBUF(UseMI)) in frameIndexMayFold() 188 if (!TII->isFLATScratch(UseMI)) in frameIndexMayFold() 204 MachineInstr *MI = Fold.UseMI; in canUseImmWithOpSel() [all …]
|
H A D | SIOptimizeVGPRLiveRange.cpp | 214 for (auto &UseMI : MRI->use_nodbg_instructions(Reg)) { in findNonPHIUsesInBlock() local 215 if (UseMI.getParent() == MBB && !UseMI.isPHI()) in findNonPHIUsesInBlock() 216 Uses.push_back(&UseMI); in findNonPHIUsesInBlock() 305 auto *UseMI = I->getParent(); in collectCandidateRegisters() local 306 auto *UseMBB = UseMI->getParent(); in collectCandidateRegisters() 308 if (!UseMI->isPHI()) in collectCandidateRegisters() 311 auto *IncomingMBB = UseMI->getOperand(I.getOperandNo() + 1).getMBB(); in collectCandidateRegisters() 430 auto *UseMI = I->getParent(); in updateLiveRangeInThenRegion() local 431 if (UseMI->isPHI() && I->readsReg()) { in updateLiveRangeInThenRegion() 432 if (Blocks.contains(UseMI->getParent())) in updateLiveRangeInThenRegion() [all …]
|
H A D | SIFixSGPRCopies.cpp | 234 const auto *UseMI = MO.getParent(); in tryChangeVGPRtoSGPRinCopy() local 235 if (UseMI == &MI) in tryChangeVGPRtoSGPRinCopy() 237 if (MO.isDef() || UseMI->getParent() != MI.getParent() || in tryChangeVGPRtoSGPRinCopy() 238 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END) in tryChangeVGPRtoSGPRinCopy() 242 if (OpIdx >= UseMI->getDesc().getNumOperands() || in tryChangeVGPRtoSGPRinCopy() 243 !TII->isOperandLegal(*UseMI, OpIdx, &Src)) in tryChangeVGPRtoSGPRinCopy() 804 const MachineInstr *UseMI = Use.getParent(); in processPHINode() local 805 AllAGPRUses &= (UseMI->isCopy() && in processPHINode() 806 TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) || in processPHINode() 808 if (UseMI->isCopy() || UseMI->isRegSequence()) { in processPHINode() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonOptAddrMode.cpp | 94 bool xformUseMI(MachineInstr *TfrMI, MachineInstr *UseMI, 98 bool updateAddUses(MachineInstr *AddMI, MachineInstr *UseMI); 191 MachineInstr &UseMI = *NodeAddr<StmtNode *>(IA).Addr->getCode(); in canRemoveAddasl() local 195 MI.getParent() != UseMI.getParent()) in canRemoveAddasl() 198 const MCInstrDesc &UseMID = UseMI.getDesc(); in canRemoveAddasl() 200 HII->getAddrMode(UseMI) != HexagonII::BaseImmOffset || in canRemoveAddasl() 201 getBaseWithLongOffset(UseMI) < 0) in canRemoveAddasl() 205 if (UseMID.mayStore() && UseMI.getOperand(2).isReg() && in canRemoveAddasl() 206 UseMI.getOperand(2).getReg() == MI.getOperand(0).getReg()) in canRemoveAddasl() 209 for (auto &Mo : UseMI.operands()) in canRemoveAddasl() [all …]
|
H A D | HexagonConstExtenders.cpp | 319 MachineInstr *UseMI = nullptr; member 332 return UseMI->getOperand(OpNum); in getOp() 335 return UseMI->getOperand(OpNum); in getOp() 1104 unsigned IdxOpc = getRegOffOpcode(ED.UseMI->getOpcode()); in getOffsetRange() 1114 if (!ED.UseMI->mayLoad() && !ED.UseMI->mayStore()) in getOffsetRange() 1219 ED.UseMI = &MI; in recordExtender() 1290 if (ED.UseMI->getOpcode() == Hexagon::A2_tfrsi) { in assignInits() 1494 MachineBasicBlock *DomB = ED0.UseMI->getParent(); in calculatePlacement() 1495 RefMIs.insert(ED0.UseMI); in calculatePlacement() 1499 MachineBasicBlock *MBB = ED.UseMI->getParent(); in calculatePlacement() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVMergeBaseOffset.cpp | 381 for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) { in foldIntoMemoryOps() local 382 switch (UseMI.getOpcode()) { in foldIntoMemoryOps() 384 LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI); in foldIntoMemoryOps() 403 if (UseMI.getOperand(1).isFI()) in foldIntoMemoryOps() 406 if (DestReg == UseMI.getOperand(0).getReg()) in foldIntoMemoryOps() 408 assert(DestReg == UseMI.getOperand(1).getReg() && in foldIntoMemoryOps() 411 int64_t Offset = UseMI.getOperand(2).getImm(); in foldIntoMemoryOps() 422 I < UseMI.getNumOperands(); I += 1 + NumOps) { in foldIntoMemoryOps() 423 const MachineOperand &FlagsMO = UseMI.getOperand(I); in foldIntoMemoryOps() 436 const MachineOperand &MO = UseMI.getOperand(I + 1 + J); in foldIntoMemoryOps() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | LiveRangeEdit.cpp | 209 MachineInstr *DefMI = nullptr, *UseMI = nullptr; in foldAsLoad() local 221 if (UseMI && UseMI != MI) in foldAsLoad() 226 UseMI = MI; in foldAsLoad() 229 if (!DefMI || !UseMI) in foldAsLoad() 235 LIS.getInstructionIndex(*UseMI))) in foldAsLoad() 245 << " into single use: " << *UseMI); in foldAsLoad() 248 if (UseMI->readsWritesVirtualRegister(LI->reg(), &Ops).second) in foldAsLoad() 251 MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS); in foldAsLoad() 255 LIS.ReplaceMachineInstrInMaps(*UseMI, *FoldMI); in foldAsLoad() 257 if (UseMI->shouldUpdateCallSiteInfo()) in foldAsLoad() [all …]
|
H A D | MachineTraceMetrics.cpp | 669 static bool getDataDeps(const MachineInstr &UseMI, in getDataDeps() argument 673 if (UseMI.isDebugInstr()) in getDataDeps() 677 for (const MachineOperand &MO : UseMI.operands()) { in getDataDeps() 697 static void getPHIDeps(const MachineInstr &UseMI, in getPHIDeps() argument 704 assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI"); in getPHIDeps() 705 for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) { in getPHIDeps() 706 if (UseMI.getOperand(i + 1).getMBB() == Pred) { in getPHIDeps() 707 Register Reg = UseMI.getOperand(i).getReg(); in getPHIDeps() 716 static void updatePhysDepsDownwards(const MachineInstr *UseMI, in updatePhysDepsDownwards() argument 723 for (const MachineOperand &MO : UseMI->operands()) { in updatePhysDepsDownwards() [all …]
|
H A D | TargetSchedule.cpp | 175 const MachineInstr *UseMI, unsigned UseOperIdx) const { in computeOperandLatency() 185 if (UseMI) { in computeOperandLatency() 187 *UseMI, UseOperIdx); in computeOperandLatency() 209 if (!UseMI) in computeOperandLatency() 213 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); in computeOperandLatency() 216 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); in computeOperandLatency() 170 computeOperandLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * UseMI,unsigned UseOperIdx) const computeOperandLatency() argument
|
H A D | MachineLICM.cpp | 1066 for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) { in isCopyFeedingInvariantStore() 1067 if (UseMI.mayStore() && isInvariantStore(UseMI, TRI, MRI)) in isCopyFeedingInvariantStore() 1130 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { in HasLoopPHIUse() 1132 if (UseMI.isPHI()) { in HasLoopPHIUse() 1135 if (CurLoop->contains(&UseMI)) in HasLoopPHIUse() 1140 if (isExitBlock(CurLoop, UseMI.getParent())) in HasLoopPHIUse() 1145 if (UseMI.isCopy() && CurLoop->contains(&UseMI)) in HasLoopPHIUse() 1146 Work.push_back(&UseMI); in HasLoopPHIUse() 1161 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in HasHighOperandLatency() 1162 if (UseMI.isCopyLike()) in HasHighOperandLatency() [all …]
|
H A D | DetectDeadLanes.cpp | 339 const MachineInstr &UseMI = *MO.getParent(); in determineInitialUsedLanes() local 340 if (UseMI.isKill()) in determineInitialUsedLanes() 344 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 345 assert(UseMI.getDesc().getNumDefs() == 1); in determineInitialUsedLanes() 346 const MachineOperand &Def = *UseMI.defs().begin(); in determineInitialUsedLanes() 353 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 355 CrossCopy = isCrossCopy(*MRI, UseMI, DstRC, MO); in determineInitialUsedLanes() 357 LLVM_DEBUG(dbgs() << "Copy across incompatible classes: " << UseMI); in determineInitialUsedLanes()
|
H A D | MachineSSAUpdater.cpp | 230 MachineInstr *UseMI = U.getParent(); in RewriteUse() local 232 if (UseMI->isPHI()) { in RewriteUse() 233 MachineBasicBlock *SourceBB = findCorrespondingPred(UseMI, &U); in RewriteUse() 236 NewVR = GetValueInMiddleOfBlock(UseMI->getParent()); in RewriteUse() 246 MachineBasicBlock *UseBB = UseMI->getParent(); in RewriteUse()
|
H A D | OptimizePHIs.cpp | 155 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DstReg)) { in IsDeadPHICycle() 156 if (!UseMI.isPHI() || !IsDeadPHICycle(&UseMI, PHIsInCycle)) in IsDeadPHICycle()
|
H A D | TailDuplicator.cpp | 228 MachineInstr *UseMI = UseMO.getParent(); in tailDuplicateAndUpdate() local 233 if (UseMI->isDebugValue()) { in tailDuplicateAndUpdate() 237 if (UseMI->getParent() == DefBB && !UseMI->isPHI()) in tailDuplicateAndUpdate() 242 MachineInstr *UseMI = UseMO->getParent(); in tailDuplicateAndUpdate() local 244 SSAUpdate.GetValueInMiddleOfBlock(UseMI->getParent(), true)); in tailDuplicateAndUpdate() 308 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { in isDefLiveOut() 309 if (UseMI.isDebugValue()) in isDefLiveOut() 311 if (UseMI.getParent() != BB) in isDefLiveOut()
|
H A D | PeepholeOptimizer.cpp | 547 MachineInstr *UseMI = UseMO.getParent(); in INITIALIZE_PASS_DEPENDENCY() local 548 if (UseMI == &MI) in INITIALIZE_PASS_DEPENDENCY() 551 if (UseMI->isPHI()) { in INITIALIZE_PASS_DEPENDENCY() 577 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) in INITIALIZE_PASS_DEPENDENCY() 580 MachineBasicBlock *UseMBB = UseMI->getParent(); in INITIALIZE_PASS_DEPENDENCY() 583 if (!LocalMIs.count(UseMI)) in INITIALIZE_PASS_DEPENDENCY() 619 MachineInstr *UseMI = UseMO->getParent(); in INITIALIZE_PASS_DEPENDENCY() local 620 MachineBasicBlock *UseMBB = UseMI->getParent(); in INITIALIZE_PASS_DEPENDENCY() 645 RC = MRI->getRegClass(UseMI->getOperand(0).getReg()); in INITIALIZE_PASS_DEPENDENCY() 648 BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), in INITIALIZE_PASS_DEPENDENCY() [all …]
|
H A D | RegisterCoalescer.cpp | 884 MachineInstr *UseMI = MO.getParent(); in removeCopyByCommutingDef() local 885 unsigned OpNo = &MO - &UseMI->getOperand(0); in removeCopyByCommutingDef() 886 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI); in removeCopyByCommutingDef() 891 if (UseMI->isRegTiedToDefOperand(OpNo)) in removeCopyByCommutingDef() 929 MachineInstr *UseMI = UseMO.getParent(); in removeCopyByCommutingDef() local 930 if (UseMI->isDebugInstr()) { in removeCopyByCommutingDef() 936 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI).getRegSlot(true); in removeCopyByCommutingDef() 947 if (UseMI == CopyMI) in removeCopyByCommutingDef() 949 if (!UseMI->isCopy()) in removeCopyByCommutingDef() 951 if (UseMI->getOperand(0).getReg() != IntB.reg() || in removeCopyByCommutingDef() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86FastPreTileConfig.cpp | 72 void reload(MachineBasicBlock::iterator UseMI, Register VirtReg, 218 void X86FastPreTileConfig::reload(MachineBasicBlock::iterator UseMI, in reload() argument 232 if (UseMI->isCopy()) in reload() 233 TileReg = UseMI->getOperand(0).getReg(); in reload() 242 MachineInstr *NewMI = BuildMI(*UseMI->getParent(), UseMI, DebugLoc(), in reload() 246 BuildMI(*UseMI->getParent(), UseMI, DebugLoc(), TII->get(Opc), TileReg) in reload() 256 if (UseMI->isCopy()) { in reload() 257 UseMI->eraseFromParent(); in reload() 260 for (auto &MO : UseMI->operands()) { in reload() 625 for (MachineInstr &UseMI : MRI->use_instructions(TileReg)) { in configBasicBlock() [all …]
|
H A D | X86SpeculativeLoadHardening.cpp | 1783 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) { in sinkPostLoadHardenedInst() 1786 if (HardenedInstrs.count(&UseMI)) { in sinkPostLoadHardenedInst() 1787 if (!X86InstrInfo::isDataInvariantLoad(UseMI) || isEFLAGSDefLive(UseMI)) { in sinkPostLoadHardenedInst() 1791 assert(X86InstrInfo::isDataInvariant(UseMI) && in sinkPostLoadHardenedInst() 1798 const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(UseMI); in sinkPostLoadHardenedInst() 1803 UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg); in sinkPostLoadHardenedInst() 1805 UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg); in sinkPostLoadHardenedInst() 1821 if (!X86InstrInfo::isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent() || in sinkPostLoadHardenedInst() 1822 isEFLAGSDefLive(UseMI)) in sinkPostLoadHardenedInst() 1827 if (UseMI.getDesc().getNumDefs() > 1) in sinkPostLoadHardenedInst() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | Localizer.cpp | 128 MachineInstr &UseMI = *MOUse.getParent(); in localizeInterBlock() 129 if (MRI->hasOneUse(Reg) && !UseMI.isPHI()) in localizeInterBlock() 130 InsertMBB->insert(UseMI, LocalizedMI); in localizeInterBlock() 165 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in localizeIntraBlock() 166 if (!UseMI.isPHI()) in localizeIntraBlock() 167 Users.insert(&UseMI); in localizeIntraBlock() 126 MachineInstr &UseMI = *MOUse.getParent(); localizeInterBlock() local
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | MLxExpansionPass.cpp | 122 MachineInstr *UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() local 123 if (UseMI->getParent() != MBB) in getDefReg() 126 while (UseMI->isCopy() || UseMI->isInsertSubreg()) { in getDefReg() 127 Reg = UseMI->getOperand(0).getReg(); in getDefReg() 130 UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() 131 if (UseMI->getParent() != MBB) in getDefReg()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64Subtarget.cpp | 505 const MachineInstr *UseMI = Use->getInstr(); in adjustSchedDependency() local 506 if (UseMI->getOpcode() == TargetOpcode::BUNDLE) { in adjustSchedDependency() 507 Register Reg = UseMI->getOperand(UseOpIdx).getReg(); in adjustSchedDependency() 508 for (const auto &Op : const_mi_bundle_ops(*UseMI)) { in adjustSchedDependency() 510 UseMI = Op.getParent(); in adjustSchedDependency() 518 SchedModel->computeOperandLatency(DefMI, DefOpIdx, UseMI, UseOpIdx)); in adjustSchedDependency()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCVSXSwapRemoval.cpp | 679 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { in recordUnoptimizableWebs() 680 int UseIdx = SwapMap[&UseMI]; in recordUnoptimizableWebs() 692 LLVM_DEBUG(UseMI.dump()); in recordUnoptimizableWebs() 701 Register SwapDefReg = UseMI.getOperand(0).getReg(); in recordUnoptimizableWebs() 713 LLVM_DEBUG(UseMI.dump()); in recordUnoptimizableWebs() 745 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { in recordUnoptimizableWebs() 746 int UseIdx = SwapMap[&UseMI]; in recordUnoptimizableWebs() 787 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { in markSwapsForRemoval() 788 int UseIdx = SwapMap[&UseMI]; in markSwapsForRemoval() 792 LLVM_DEBUG(UseMI.dump()); in markSwapsForRemoval()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | Mips16RegisterInfo.cpp | 58 MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC, in saveScavengerRegister() argument 63 TII.copyPhysReg(MBB, UseMI, DL, Reg, Mips::T0, true); in saveScavengerRegister()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SPIRV/ |
H A D | SPIRVPreLegalizer.cpp | 159 MachineInstr *UseMI = &*I; in findAssignTypeInstr() local 160 if ((isSpvIntrinsic(*UseMI, Intrinsic::spv_assign_ptr_type) || in findAssignTypeInstr() 161 isSpvIntrinsic(*UseMI, Intrinsic::spv_assign_type)) && in findAssignTypeInstr() 162 UseMI->getOperand(1).getReg() == Reg) in findAssignTypeInstr() 163 return UseMI; in findAssignTypeInstr() 489 MachineInstr &UseMI = *MRI.use_instr_begin(Reg); in generateAssignInstrs() local 490 if (isSpvIntrinsic(UseMI, Intrinsic::spv_assign_type) || in generateAssignInstrs() 491 isSpvIntrinsic(UseMI, Intrinsic::spv_assign_name)) in generateAssignInstrs() 603 MachineInstr &UseMI = *MRI.use_instr_begin(DstReg); in processInstrsWithTypeFolding() local 604 if (UseMI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) in processInstrsWithTypeFolding()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/VE/ |
H A D | VEInstrInfo.cpp | 578 bool VEInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, in foldImmediate() argument 639 LLVM_DEBUG(UseMI.dump()); in foldImmediate() 661 switch (UseMI.getOpcode()) { in foldImmediate() 708 if (UseMI.getOperand(1).getReg() == Reg) { in foldImmediate() 711 assert(UseMI.getOperand(2).getReg() == Reg); in foldImmediate() 725 if (UseMI.getOperand(1).getReg() == Reg) { in foldImmediate() 732 assert(UseMI.getOperand(2).getReg() == Reg); in foldImmediate() 745 UseMI.setDesc(get(NewUseOpc)); in foldImmediate() 747 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg()); in foldImmediate() 749 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal); in foldImmediate()
|