/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64MacroFusion.cpp | 22 static bool isArithmeticBccPair(const MachineInstr *FirstMI, in isArithmeticBccPair() argument 28 if (FirstMI == nullptr) in isArithmeticBccPair() 33 if (CmpOnly && FirstMI->getOperand(0).isReg() && in isArithmeticBccPair() 34 !(FirstMI->getOperand(0).getReg() == AArch64::XZR || in isArithmeticBccPair() 35 FirstMI->getOperand(0).getReg() == AArch64::WZR)) { in isArithmeticBccPair() 39 switch (FirstMI->getOpcode()) { in isArithmeticBccPair() 64 return !AArch64InstrInfo::hasShiftedReg(*FirstMI); in isArithmeticBccPair() 71 static bool isArithmeticCbzPair(const MachineInstr *FirstMI, in isArithmeticCbzPair() argument 80 if (FirstMI == nullptr) in isArithmeticCbzPair() 83 switch (FirstMI->getOpcode()) { in isArithmeticCbzPair() [all …]
|
H A D | AArch64LoadStoreOptimizer.cpp | 565 static bool isPreLdStPairCandidate(MachineInstr &FirstMI, MachineInstr &MI) { in isPreLdStPairCandidate() argument 567 unsigned OpcA = FirstMI.getOpcode(); in isPreLdStPairCandidate() 1361 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI, in areCandidatesToMergeOrPair() argument 1369 assert(!FirstMI.hasOrderedMemoryRef() && in areCandidatesToMergeOrPair() 1370 !TII->isLdStPairSuppressed(FirstMI) && in areCandidatesToMergeOrPair() 1377 unsigned OpcA = FirstMI.getOpcode(); in areCandidatesToMergeOrPair() 1382 return !AArch64InstrInfo::isPreLdSt(FirstMI); in areCandidatesToMergeOrPair() 1385 if (AArch64InstrInfo::isPreLdSt(FirstMI) && AArch64InstrInfo::isPreLdSt(MI)) in areCandidatesToMergeOrPair() 1412 if (isPreLdStPairCandidate(FirstMI, MI)) in areCandidatesToMergeOrPair() 1455 canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween, in canRenameUpToDef() argument [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCMacroFusion.cpp | 68 static bool matchingRegOps(const MachineInstr &FirstMI, in matchingRegOps() argument 72 const MachineOperand &Op1 = FirstMI.getOperand(FirstMIOpIndex); in matchingRegOps() 93 // Return true if the FirstMI meets the constraints of SecondMI according to 96 const MachineInstr &FirstMI, in checkOpConstraints() argument 127 const MachineOperand &SI = FirstMI.getOperand(2); in checkOpConstraints() 152 return (matchingImmOps(FirstMI, 2, 3) && matchingImmOps(FirstMI, 3, 60)) || in checkOpConstraints() 153 (matchingImmOps(FirstMI, 2, 6) && matchingImmOps(FirstMI, 3, 57)); in checkOpConstraints() 157 return matchingImmOps(FirstMI, in checkOpConstraints() 236 shouldScheduleAdjacent(const TargetInstrInfo & TII,const TargetSubtargetInfo & TSI,const MachineInstr * FirstMI,const MachineInstr & SecondMI) shouldScheduleAdjacent() argument [all...] |
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | GCNCreateVOPD.cpp | 48 : FirstMI(First), SecondMI(Second) {} in VOPDCombineInfo() 50 MachineInstr *FirstMI; member in __anon4acca4580111::GCNCreateVOPD::VOPDCombineInfo 70 auto *FirstMI = CI.FirstMI; in doReplace() local 72 unsigned Opc1 = FirstMI->getOpcode(); in doReplace() 82 auto VOPDInst = BuildMI(*FirstMI->getParent(), FirstMI, in doReplace() 83 FirstMI->getDebugLoc(), SII->get(NewOpcode)) in doReplace() 84 .setMIFlags(FirstMI->getFlags() | SecondMI->getFlags()); in doReplace() 87 MachineInstr *MI[] = {FirstMI, SecondMI}; in doReplace() 89 AMDGPU::getVOPDInstInfo(FirstMI->getDesc(), SecondMI->getDesc()); in doReplace() 109 << *CI.FirstMI << "\tY: " << *CI.SecondMI << "\n"); in doReplace() [all …]
|
H A D | GCNVOPDUtils.cpp | 38 const MachineInstr &FirstMI, in checkVOPDRegConstraints() argument 42 const MachineFunction *MF = FirstMI.getMF(); in checkVOPDRegConstraints() 57 for (auto MII = MachineBasicBlock::const_iterator(&FirstMI); in checkVOPDRegConstraints() 58 MII != FirstMI.getParent()->instr_end(); ++MII) { in checkVOPDRegConstraints() 63 }() && "Expected FirstMI to precede SecondMI"); in checkVOPDRegConstraints() 66 if (Use.isReg() && FirstMI.modifiesRegister(Use.getReg(), TRI)) in checkVOPDRegConstraints() 70 const MachineInstr &MI = (OpcodeIdx == VOPD::X) ? FirstMI : SecondMI; in checkVOPDRegConstraints() 78 AMDGPU::getVOPDInstInfo(FirstMI.getDesc(), SecondMI.getDesc()); in checkVOPDRegConstraints() 81 const MachineInstr &MI = (CompIdx == VOPD::X) ? FirstMI : SecondMI; in checkVOPDRegConstraints() 109 FirstMI in checkVOPDRegConstraints() 119 shouldScheduleVOPDAdjacent(const TargetInstrInfo & TII,const TargetSubtargetInfo & TSI,const MachineInstr * FirstMI,const MachineInstr & SecondMI) shouldScheduleVOPDAdjacent() argument [all...] |
H A D | AMDGPUMacroFusion.cpp | 23 /// Check if the instr pair, FirstMI and SecondMI, should be fused 24 /// together. Given SecondMI, when FirstMI is unspecified, then check if 28 const MachineInstr *FirstMI, in shouldScheduleAdjacent() argument 40 if (!FirstMI) in shouldScheduleAdjacent() 43 const MachineBasicBlock &MBB = *FirstMI->getParent(); in shouldScheduleAdjacent() 48 return FirstMI->definesRegister(Src2->getReg(), TRI); in shouldScheduleAdjacent()
|
H A D | GCNVOPDUtils.h | 25 const MachineInstr &FirstMI,
|
H A D | SIWholeQuadMode.cpp | 1552 MachineInstr *FirstMI = &*MBB->begin(); in lowerInitExec() local 1557 if (DefInstr != FirstMI) { in lowerInitExec() 1561 MBB->insert(FirstMI, DefInstr); in lowerInitExec() 1566 FirstMI = &*std::next(FirstMI->getIterator()); in lowerInitExec() 1576 auto BfeMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_BFE_U32), CountReg) in lowerInitExec() 1580 BuildMI(*MBB, FirstMI, DL, in lowerInitExec() 1584 auto CmpMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_CMP_EQ_U32)) in lowerInitExec() 1588 BuildMI(*MBB, FirstMI, DL, in lowerInitExec()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMMacroFusion.cpp | 22 static bool isAESPair(const MachineInstr *FirstMI, in isAESPair() argument 28 return FirstMI == nullptr || FirstMI->getOpcode() == ARM::AESE; in isAESPair() 31 return FirstMI == nullptr || FirstMI->getOpcode() == ARM::AESD; in isAESPair() 38 static bool isLiteralsPair(const MachineInstr *FirstMI, in isLiteralsPair() argument 41 if ((FirstMI == nullptr || FirstMI->getOpcode() == ARM::MOVi16) && in isLiteralsPair() 48 /// Check if the instr pair, FirstMI and SecondMI, should be fused 49 /// together. Given SecondMI, when FirstMI i 53 shouldScheduleAdjacent(const TargetInstrInfo & TII,const TargetSubtargetInfo & TSI,const MachineInstr * FirstMI,const MachineInstr & SecondMI) shouldScheduleAdjacent() argument [all...] |
H A D | ARMLoadStoreOptimizer.cpp | 1006 const MachineInstr *FirstMI = MemOps[0].MI; in FormCandidates() local 1007 unsigned Opcode = FirstMI->getOpcode(); in FormCandidates() 1009 unsigned Size = getLSMultipleTransferSize(FirstMI); in FormCandidates()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVMacroFusion.cpp |
|
H A D | RISCVMakeCompressible.cpp | 281 static Register analyzeCompressibleUses(MachineInstr &FirstMI, in analyzeCompressibleUses() argument 284 MachineBasicBlock &MBB = *FirstMI.getParent(); in analyzeCompressibleUses() 288 for (MachineBasicBlock::instr_iterator I = FirstMI.getIterator(), in analyzeCompressibleUses() 333 return RS.scavengeRegisterBackwards(*RCToScavenge, FirstMI.getIterator(), in analyzeCompressibleUses()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | MachineInstrBundle.cpp | 109 static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, in getDebugLoc() argument 111 for (auto MII = FirstMI; MII != LastMI; ++MII) in getDebugLoc() 124 MachineBasicBlock::instr_iterator FirstMI, in finalizeBundle() argument 126 assert(FirstMI != LastMI && "Empty bundle?"); in finalizeBundle() 127 MIBundleBuilder Bundle(MBB, FirstMI, LastMI); in finalizeBundle() 134 BuildMI(MF, getDebugLoc(FirstMI, LastMI), TII->get(TargetOpcode::BUNDLE)); in finalizeBundle() 146 for (auto MII = FirstMI; MII != LastMI; ++MII) { in finalizeBundle() 228 for (auto MII = FirstMI; MII != LastMI; ++MII) { in finalizeBundle() 243 MachineBasicBlock::instr_iterator FirstMI) { in finalizeBundle() argument 245 MachineBasicBlock::instr_iterator LastMI = std::next(FirstMI); in finalizeBundle() [all …]
|
H A D | XRayInstrumentation.cpp | 211 auto &FirstMI = *FirstMBB.begin(); in runOnMachineFunction() local 214 FirstMI.emitError("An attempt to perform XRay instrumentation for an" in runOnMachineFunction() 222 BuildMI(FirstMBB, FirstMI, FirstMI.getDebugLoc(), in runOnMachineFunction()
|
H A D | VirtRegMap.cpp | 460 MachineInstr *FirstMI = MIs.back(); in expandCopyBundle() local 490 MachineInstr *BundleStart = FirstMI; in expandCopyBundle() 503 if (Indexes && BundledMI != FirstMI) in expandCopyBundle()
|
H A D | InlineSpiller.cpp | 274 static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, in isCopyOfBundle() argument 276 if (!FirstMI.isBundled()) in isCopyOfBundle() 277 return isCopyOf(FirstMI, Reg, TII); in isCopyOfBundle() 279 assert(!FirstMI.isBundledWithPred() && FirstMI.isBundledWithSucc() && in isCopyOfBundle() 283 MachineBasicBlock::const_instr_iterator I = FirstMI.getIterator(); in isCopyOfBundle()
|
H A D | ModuloSchedule.cpp | 1313 MachineInstr *FirstMI = nullptr; in rewrite() local 1320 if (!FirstMI) in rewrite() 1321 FirstMI = MI; in rewrite() 1323 assert(FirstMI && "Failed to find first MI in schedule"); in rewrite() 1327 for (auto I = BB->getFirstNonPHI(); I != FirstMI->getIterator();) { in rewrite()
|
H A D | RegAllocGreedy.cpp | 1352 const MachineInstr &FirstMI, in getInstReadLaneMask() argument 1356 (void)AnalyzeVirtRegInBundle(const_cast<MachineInstr &>(FirstMI), Reg, &Ops); in getInstReadLaneMask()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/Target/ |
H A D | TargetMacroFusion.td | 21 // * const MachineInstr *FirstMI 48 // Tie firstOpIdx and secondOpIdx. The operand of `FirstMI` at position 69 // if (!FirstMI) 78 // Indicates that the destination register of `FirstMI` should have one use if 88 // const MachineInstr *FirstMI, 107 // const MachineInstr *FirstMI, 113 // /* Predicate for `FirstMI` */
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86MacroFusion.cpp | 32 /// Check if the instr pair, FirstMI and SecondMI, should be fused 33 /// together. Given SecondMI, when FirstMI is unspecified, then check if 37 const MachineInstr *FirstMI, in shouldScheduleAdjacent() argument 50 if (FirstMI == nullptr) in shouldScheduleAdjacent() 53 const X86::FirstMacroFusionInstKind TestKind = classifyFirst(*FirstMI); in shouldScheduleAdjacent()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonStoreWidening.cpp | 314 MachineInstr *FirstMI = *Begin; in selectStores() local 315 assert(!FirstMI->memoperands_empty() && "Expecting some memory operands"); in selectStores() 316 const MachineMemOperand &FirstMMO = getStoreTarget(FirstMI); in selectStores() 319 unsigned FirstOffset = getStoreOffset(FirstMI); in selectStores() 340 OG.push_back(FirstMI); in selectStores() 341 MachineInstr *S1 = FirstMI; in selectStores()
|
H A D | HexagonVLIWPacketizer.cpp | 1819 MachineBasicBlock::instr_iterator FirstMI(OldPacketMIs.front()); in endPacket() local 1821 finalizeBundle(*MBB, FirstMI, LastMI); in endPacket() 1822 auto BundleMII = std::prev(FirstMI); in endPacket()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | MachineInstrBundle.h | 28 MachineBasicBlock::instr_iterator FirstMI, 37 MachineBasicBlock::instr_iterator FirstMI);
|
H A D | MacroFusion.h | 29 /// Check if the instr pair, FirstMI and SecondMI, should be fused 30 /// together. Given SecondMI, when FirstMI is unspecified, then check if 34 const MachineInstr *FirstMI,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZInstrInfo.cpp | 101 MachineInstr *FirstMI = HighPartMI; in splitMove() local 103 FirstMI->getOperand(0).setIsKill(false); in splitMove() 122 FirstMI = LowPartMI; in splitMove() 127 FirstMI->getOperand(1).setIsKill(false); in splitMove() 128 FirstMI->getOperand(3).setIsKill(false); in splitMove()
|