/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | MLxExpansionPass.cpp | 94 MachineInstr *DefMI = MRI->getVRegDef(Reg); in getAccDefMI() local 96 if (DefMI->getParent() != MBB) in getAccDefMI() 98 if (DefMI->isCopyLike()) { in getAccDefMI() 99 Reg = DefMI->getOperand(1).getReg(); in getAccDefMI() 101 DefMI = MRI->getVRegDef(Reg); in getAccDefMI() 104 } else if (DefMI->isInsertSubreg()) { in getAccDefMI() 105 Reg = DefMI->getOperand(2).getReg(); in getAccDefMI() 107 DefMI = MRI->getVRegDef(Reg); in getAccDefMI() 113 return DefMI; in getAccDefMI() 146 MachineInstr *DefMI = MRI->getVRegDef(Reg); in hasLoopHazard() local [all …]
|
H A D | ARMHazardRecognizer.cpp | 28 static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI, in hasRAWHazard() argument 39 return MI->readsRegister(DefMI->getOperand(0).getReg(), &TRI); in hasRAWHazard() 54 MachineInstr *DefMI = LastMI; in getHazardType() local 67 DefMI = &*I; in getHazardType() 71 if (TII.isFpMLxInstruction(DefMI->getOpcode()) && in getHazardType() 73 hasRAWHazard(DefMI, MI, TII.getRegisterInfo()))) { in getHazardType()
|
H A D | ARMFixCortexA57AES1742098Pass.cpp | 367 MachineInstr *DefMI = *It; in analyzeMF() local 371 << printReg(MOp.getReg(), TRI) << ": " << *DefMI); in analyzeMF() 378 MachineBasicBlock::iterator DefIt = DefMI; in analyzeMF() 380 if (DefIt != DefMI->getParent()->end()) { in analyzeMF() 381 LLVM_DEBUG(dbgs() << "Moving Fixup to immediately after " << *DefMI in analyzeMF()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | TargetSchedule.cpp | 174 const MachineInstr *DefMI, unsigned DefOperIdx, in computeOperandLatency() 177 const unsigned InstrLatency = computeInstrLatency(DefMI); in computeOperandLatency() 178 const unsigned DefaultDefLatency = TII->defaultDefLatency(SchedModel, *DefMI); in computeOperandLatency() 186 OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx, in computeOperandLatency() 190 unsigned DefClass = DefMI->getDesc().getSchedClass(); in computeOperandLatency() 201 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI); in computeOperandLatency() 202 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx); in computeOperandLatency() 225 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() && in computeOperandLatency() 226 !DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() && in computeOperandLatency() 229 << *DefMI << " (Tr in computeOperandLatency() 169 computeOperandLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * UseMI,unsigned UseOperIdx) const computeOperandLatency() argument 274 computeOutputLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * DepMI) const computeOutputLatency() argument [all...] |
H A D | LiveRangeEdit.cpp | 72 const MachineInstr *DefMI) { in checkRematerializable() argument 73 assert(DefMI && "Missing instruction"); in checkRematerializable() 75 if (!TII.isTriviallyReMaterializable(*DefMI)) in checkRematerializable() 90 MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def); in scanRemattable() local 91 if (!DefMI) in scanRemattable() 93 checkRematerializable(OrigVNI, DefMI); in scanRemattable() 209 MachineInstr *DefMI = nullptr, *UseMI = nullptr; in foldAsLoad() local 215 if (DefMI && DefMI != MI) in foldAsLoad() 219 DefMI = MI; in foldAsLoad() 229 if (!DefMI || !UseMI) in foldAsLoad() [all …]
|
H A D | MachineLateInstrsCleanup.cpp | 131 if (MachineInstr *DefMI = RegDefs[MBB->getNumber()].lookup(Reg)) in clearKillsForDef() local 132 if (DefMI->getParent() == MBB) in clearKillsForDef() 190 for (auto [Reg, DefMI] : RegDefs[FirstPred->getNumber()]) in processBlock() 193 [&, &Reg = Reg, &DefMI = DefMI](const MachineBasicBlock *Pred) { in processBlock() 194 return RegDefs[Pred->getNumber()].hasIdentical(Reg, DefMI); in processBlock() 196 MBBDefs[Reg] = DefMI; in processBlock() 198 << printMBBReference(*MBB) << ": " << *DefMI;); in processBlock()
|
H A D | MachineTraceMetrics.cpp | 646 const MachineInstr *DefMI; member 650 DataDep(const MachineInstr *DefMI, unsigned DefOp, unsigned UseOp) in DataDep() 651 : DefMI(DefMI), DefOp(DefOp), UseOp(UseOp) {} in DataDep() 659 DefMI = DefI->getParent(); in DataDep() 783 const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg); in computeCrossBlockCriticalPath() local 785 const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()]; in computeCrossBlockCriticalPath() 788 unsigned Len = LIR.Height + Cycles[DefMI].Depth; in computeCrossBlockCriticalPath() 808 BlockInfo[Dep.DefMI->getParent()->getNumber()]; in updateDepth() 813 unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth; in updateDepth() 815 if (!Dep.DefMI->isTransient()) in updateDepth() [all …]
|
H A D | RegisterCoalescer.cpp | 845 MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def); in removeCopyByCommutingDef() local 846 if (!DefMI) in removeCopyByCommutingDef() 848 if (!DefMI->isCommutable()) in removeCopyByCommutingDef() 852 int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg(), /*TRI=*/nullptr); in removeCopyByCommutingDef() 855 if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx)) in removeCopyByCommutingDef() 868 if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx)) in removeCopyByCommutingDef() 871 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); in removeCopyByCommutingDef() 896 << *DefMI); in removeCopyByCommutingDef() 900 MachineBasicBlock *MBB = DefMI->getParent(); in removeCopyByCommutingDef() 902 TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx); in removeCopyByCommutingDef() [all …]
|
H A D | PHIElimination.cpp | 219 MachineInstr *DefMI = MRI->getVRegDef(VirtReg); in run() local 220 if (!DefMI) in run() 231 MachineBasicBlock *DefMBB = DefMI->getParent(); in run() 255 for (MachineInstr *DefMI : ImpDefs) { in run() 256 Register DefReg = DefMI->getOperand(0).getReg(); in run() 259 LIS->RemoveMachineInstrFromMaps(*DefMI); in run() 260 DefMI->eraseFromParent(); in run() 593 if (MachineInstr *DefMI = MRI->getVRegDef(SrcReg)) in LowerPHINode() local 594 if (DefMI->isImplicitDef()) in LowerPHINode() 595 ImpDefs.insert(DefMI); in LowerPHINode()
|
H A D | MachineCSE.cpp | 186 MachineInstr *DefMI = MRI->getVRegDef(Reg); in INITIALIZE_PASS_DEPENDENCY() local 187 if (!DefMI || !DefMI->isCopy()) in INITIALIZE_PASS_DEPENDENCY() 189 Register SrcReg = DefMI->getOperand(1).getReg(); in INITIALIZE_PASS_DEPENDENCY() 192 if (DefMI->getOperand(0).getSubReg()) in INITIALIZE_PASS_DEPENDENCY() 206 if (DefMI->getOperand(1).getSubReg()) in INITIALIZE_PASS_DEPENDENCY() 210 LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI); in INITIALIZE_PASS_DEPENDENCY() 221 DefMI->changeDebugValuesDefReg(SrcReg); in INITIALIZE_PASS_DEPENDENCY() 223 DefMI->eraseFromParent(); in INITIALIZE_PASS_DEPENDENCY()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64CondBrTuning.cpp | 67 bool tryToTuneBranch(MachineInstr &MI, MachineInstr &DefMI); 141 MachineInstr &DefMI) { in tryToTuneBranch() argument 143 if (MI.getParent() != DefMI.getParent()) in tryToTuneBranch() 149 switch (DefMI.getOpcode()) { in tryToTuneBranch() 195 if (isNZCVTouchedInInstructionRange(DefMI, MI, TRI)) in tryToTuneBranch() 198 LLVM_DEBUG(DefMI.print(dbgs())); in tryToTuneBranch() 202 NewCmp = convertToFlagSetting(DefMI, IsFlagSetting, /*Is64Bit=*/false); in tryToTuneBranch() 250 if (isNZCVTouchedInInstructionRange(DefMI, MI, TRI)) in tryToTuneBranch() 253 LLVM_DEBUG(DefMI.print(dbgs())); in tryToTuneBranch() 257 NewCmp = convertToFlagSetting(DefMI, IsFlagSetting, /*Is64Bit=*/true); in tryToTuneBranch() [all …]
|
H A D | AArch64Subtarget.cpp | 493 const MachineInstr *DefMI = Def->getInstr(); in adjustSchedDependency() local 494 if (DefMI->getOpcode() == TargetOpcode::BUNDLE) { in adjustSchedDependency() 495 Register Reg = DefMI->getOperand(DefOpIdx).getReg(); in adjustSchedDependency() 496 for (const auto &Op : const_mi_bundle_ops(*DefMI)) { in adjustSchedDependency() 498 DefMI = Op.getParent(); in adjustSchedDependency() 518 SchedModel->computeOperandLatency(DefMI, DefOpIdx, UseMI, UseOpIdx)); in adjustSchedDependency()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86OptimizeLEAs.cpp | 350 for (auto *DefMI : List) { in chooseBestLEA() local 352 int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1); in chooseBestLEA() 364 MRI->getRegClass(DefMI->getOperand(0).getReg())) in chooseBestLEA() 371 int DistTemp = calcInstrDist(*DefMI, MI); in chooseBestLEA() 381 BestLEA = DefMI; in chooseBestLEA() 525 MachineInstr *DefMI; in removeRedundantAddrCalc() local 528 if (!chooseBestLEA(Insns->second, MI, DefMI, AddrDispShift, Dist)) in removeRedundantAddrCalc() 538 DefMI->removeFromParent(); in removeRedundantAddrCalc() 539 MBB->insert(MachineBasicBlock::iterator(&MI), DefMI); in removeRedundantAddrCalc() 540 InstrPos[DefMI] = InstrPos[&MI] - 1; in removeRedundantAddrCalc() [all …]
|
H A D | X86TileConfig.cpp | 165 for (auto &DefMI : MRI.def_instructions(R)) { in INITIALIZE_PASS_DEPENDENCY() local 166 MachineBasicBlock &MBB = *DefMI.getParent(); in INITIALIZE_PASS_DEPENDENCY() 167 if (DefMI.isMoveImmediate()) { in INITIALIZE_PASS_DEPENDENCY() 170 assert(Imm == DefMI.getOperand(1).getImm() && in INITIALIZE_PASS_DEPENDENCY() 174 Imm = DefMI.getOperand(1).getImm(); in INITIALIZE_PASS_DEPENDENCY() 187 auto Iter = DefMI.getIterator(); in INITIALIZE_PASS_DEPENDENCY()
|
H A D | X86PreTileConfig.cpp | 223 MachineInstr *DefMI = MRI->getVRegDef(R); in INITIALIZE_PASS_DEPENDENCY() local 224 assert(DefMI && "R must has one define instruction"); in INITIALIZE_PASS_DEPENDENCY() 225 MachineBasicBlock *DefMBB = DefMI->getParent(); in INITIALIZE_PASS_DEPENDENCY() 226 if (DefMI->isMoveImmediate() || !DefVisited.insert(DefMI).second) in INITIALIZE_PASS_DEPENDENCY() 228 if (DefMI->isPHI()) { in INITIALIZE_PASS_DEPENDENCY() 229 for (unsigned I = 1; I < DefMI->getNumOperands(); I += 2) in INITIALIZE_PASS_DEPENDENCY() 230 if (isLoopBackEdge(DefMBB, DefMI->getOperand(I + 1).getMBB())) in INITIALIZE_PASS_DEPENDENCY() 231 RecordShape(DefMI, DefMBB); // In this case, PHI is also a shape def. in INITIALIZE_PASS_DEPENDENCY() 233 WorkList.push_back(DefMI->getOperand(I).getReg()); in INITIALIZE_PASS_DEPENDENCY() 235 RecordShape(DefMI, DefMBB); in INITIALIZE_PASS_DEPENDENCY()
|
H A D | X86CallFrameOptimization.cpp | 611 MachineInstr &DefMI = *MRI->getVRegDef(Reg); in canFoldIntoRegPush() local 615 if ((DefMI.getOpcode() != X86::MOV32rm && in canFoldIntoRegPush() 616 DefMI.getOpcode() != X86::MOV64rm) || in canFoldIntoRegPush() 617 DefMI.getParent() != FrameSetup->getParent()) in canFoldIntoRegPush() 622 for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I) in canFoldIntoRegPush() 626 return &DefMI; in canFoldIntoRegPush()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsOptimizePICCall.cpp | 280 MachineInstr *DefMI = MRI.getVRegDef(Reg); in isCallViaRegister() local 282 assert(DefMI); in isCallViaRegister() 286 if (!DefMI->mayLoad() || DefMI->getNumOperands() < 3) in isCallViaRegister() 289 unsigned Flags = DefMI->getOperand(2).getTargetFlags(); in isCallViaRegister() 295 assert(DefMI->hasOneMemOperand()); in isCallViaRegister() 296 Val = (*DefMI->memoperands_begin())->getValue(); in isCallViaRegister() 298 Val = (*DefMI->memoperands_begin())->getPseudoValue(); in isCallViaRegister()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/BPF/ |
H A D | BPFMIPeephole.cpp | 463 MachineInstr *DefMI; eliminateTruncSeq() local
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCInstrInfo.h | 192 bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI, 196 bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI, 202 MachineInstr &DefMI) const; 206 unsigned ConstantOpNo, MachineInstr &DefMI, 221 bool isDefMIElgibleForForwarding(MachineInstr &DefMI, 226 const MachineInstr &DefMI, 231 const MachineInstr &DefMI, 338 const MachineInstr &DefMI, 351 const MachineInstr &DefMI, in hasLowDefLatency() argument 497 bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, [all …]
|
H A D | PPCMIPeephole.cpp | 651 MachineInstr *DefMI = MRI->getVRegDef(TrueReg1); in simplifyCode() local 653 if (!DefMI) in simplifyCode() 656 unsigned DefOpc = DefMI->getOpcode(); in simplifyCode() 666 TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI); in simplifyCode() 690 Register DefReg1 = DefMI->getOperand(1).getReg(); in simplifyCode() 691 Register DefReg2 = DefMI->getOperand(2).getReg(); in simplifyCode() 692 unsigned DefImmed = DefMI->getOperand(3).getImm(); in simplifyCode() 741 .add(DefMI->getOperand(1)); in simplifyCode() 742 addRegToUpdate(DefMI->getOperand(0).getReg()); in simplifyCode() 743 addRegToUpdate(DefMI->getOperand(1).getReg()); in simplifyCode() [all …]
|
H A D | PPCVSXSwapRemoval.cpp | 620 MachineInstr* DefMI = MRI->getVRegDef(Reg); in formWebs() local 621 assert(SwapMap.contains(DefMI) && in formWebs() 623 int DefIdx = SwapMap[DefMI]; in formWebs() 631 LLVM_DEBUG(DefMI->dump()); in formWebs() 725 MachineInstr *DefMI = MRI->getVRegDef(UseReg); in recordUnoptimizableWebs() local 726 Register DefReg = DefMI->getOperand(0).getReg(); in recordUnoptimizableWebs() 727 int DefIdx = SwapMap[DefMI]; in recordUnoptimizableWebs() 737 LLVM_DEBUG(DefMI->dump()); in recordUnoptimizableWebs() 756 LLVM_DEBUG(DefMI->dump()); in recordUnoptimizableWebs() 802 MachineInstr *DefMI = MRI->getVRegDef(UseReg); in markSwapsForRemoval() local [all …]
|
H A D | PPCInstrInfo.cpp | 169 const InstrItineraryData *ItinData, const MachineInstr &DefMI, in getOperandLatency() argument 172 ItinData, DefMI, DefIdx, UseMI, UseIdx); in getOperandLatency() 174 if (!DefMI.getParent()) in getOperandLatency() 177 const MachineOperand &DefMO = DefMI.getOperand(DefIdx); in getOperandLatency() 183 &DefMI.getParent()->getParent()->getRegInfo(); in getOperandLatency() 193 Latency = getInstrLatency(ItinData, DefMI); in getOperandLatency() 732 MachineInstr *DefMI = MRI->getVRegDef(Reg); in getConstantFromConstantPool() local 733 for (auto MO2 : DefMI->uses()) in getConstantFromConstantPool() 2047 bool PPCInstrInfo::onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, in onlyFoldImmediate() argument 2050 unsigned DefOpc = DefMI.getOpcode(); in onlyFoldImmediate() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Lanai/ |
H A D | LanaiInstrInfo.cpp | 497 MachineInstr *DefMI = canFoldIntoSelect(MI.getOperand(1).getReg(), MRI); in optimizeSelect() local 498 bool Invert = !DefMI; in optimizeSelect() 499 if (!DefMI) in optimizeSelect() 500 DefMI = canFoldIntoSelect(MI.getOperand(2).getReg(), MRI); in optimizeSelect() 501 if (!DefMI) in optimizeSelect() 513 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg); in optimizeSelect() 516 const MCInstrDesc &DefDesc = DefMI->getDesc(); in optimizeSelect() 519 NewMI.add(DefMI->getOperand(i)); in optimizeSelect() 537 SeenMIs.erase(DefMI); in optimizeSelect() 543 if (DefMI->getParent() != MI.getParent()) in optimizeSelect() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | Utils.cpp | 462 auto *DefMI = MRI.getVRegDef(Reg); in getDefSrcRegIgnoringCopies() local 463 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); in getDefSrcRegIgnoringCopies() 466 unsigned Opc = DefMI->getOpcode(); in getDefSrcRegIgnoringCopies() 468 Register SrcReg = DefMI->getOperand(1).getReg(); in getDefSrcRegIgnoringCopies() 472 DefMI = MRI.getVRegDef(SrcReg); in getDefSrcRegIgnoringCopies() 474 Opc = DefMI->getOpcode(); in getDefSrcRegIgnoringCopies() 476 return DefinitionAndSourceRegister{DefMI, DefSrcReg}; in getDefSrcRegIgnoringCopies() 641 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); in getOpcodeDef() local 642 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; in getOpcodeDef() 805 const MachineInstr *DefMI = MRI.getVRegDef(Val); in isKnownNeverNaN() local [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIFixSGPRCopies.cpp | 724 MachineInstr *DefMI = MRI->getVRegDef(MO->getReg()); in runOnMachineFunction() local 725 if (DefMI && TII->isFoldableCopy(*DefMI)) { in runOnMachineFunction() 726 const MachineOperand &Def = DefMI->getOperand(0); in runOnMachineFunction() 730 const MachineOperand &Copied = DefMI->getOperand(1); in runOnMachineFunction() 823 MachineInstr *DefMI = MRI->getVRegDef(MI.getOperand(I).getReg()); in processPHINode() local 824 if (DefMI && DefMI->isPHI()) in processPHINode() 825 PHIOperands.insert(DefMI); in processPHINode() 846 MachineInstr *DefMI = MRI->getVRegDef(MaybeVGPRConstMO.getReg()); in tryMoveVGPRConstToSGPR() local 847 if (!DefMI || !DefMI->isMoveImmediate()) in tryMoveVGPRConstToSGPR() 850 MachineOperand *SrcConst = TII->getNamedOperand(*DefMI, AMDGPU::OpName::src0); in tryMoveVGPRConstToSGPR() [all …]
|