Lines Matching +full:early +full:- +full:to +full:- +full:mid

1 //===- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ----------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 // attempt to remove as much code from the body of a loop as possible.
12 // This pass is not intended to be a replacement or a complete alternative
13 // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
16 //===----------------------------------------------------------------------===//
62 AvoidSpeculation("avoid-speculation",
67 HoistCheapInsts("hoist-cheap-insts",
72 HoistConstStores("hoist-const-stores",
76 static cl::opt<bool> HoistConstLoads("hoist-const-loads",
81 // is based on empirical data on a single target and is subject to tuning.
83 BlockFrequencyRatioThreshold("block-freq-ratio-threshold",
91 DisableHoistingToHotterBlocks("disable-hoisting-to-hotter-blocks",
92 cl::desc("Disable hoisting instructions to"
115 "Number of instructions not hoisted due to block frequency");
140 // Holds information about whether it is allowed to move load instructions
152 CurLoop->getExitBlocks(ExitBlocks); in isExitBlock()
165 // Register pressure on path leading from loop preheader to current BB.
180 // to hoist loads from this block.
181 // Tri-state: 0 - false, 1 - true, 2 - unknown
333 INITIALIZE_PASS_BEGIN(EarlyMachineLICM, "early-machinelicm", in INITIALIZE_PASS_DEPENDENCY()
334 "Early Machine Loop Invariant Code Motion", false, false) in INITIALIZE_PASS_DEPENDENCY()
339 INITIALIZE_PASS_END(EarlyMachineLICM, "early-machinelicm", in INITIALIZE_PASS_DEPENDENCY()
340 "Early Machine Loop Invariant Code Motion", false, false) in INITIALIZE_PASS_DEPENDENCY()
355 PreRegAlloc = MRI->isSSA(); in INITIALIZE_PASS_DEPENDENCY()
359 LLVM_DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: "); in INITIALIZE_PASS_DEPENDENCY()
361 LLVM_DEBUG(dbgs() << "******** Post-regalloc Machine LICM: "); in INITIALIZE_PASS_DEPENDENCY()
365 // Estimate register pressure during pre-regalloc pass. in INITIALIZE_PASS_DEPENDENCY()
366 unsigned NumRPS = TRI->getNumRegPressureSets(); in INITIALIZE_PASS_DEPENDENCY()
371 RegLimit[i] = TRI->getRegPressureSetLimit(MF, i); in INITIALIZE_PASS_DEPENDENCY()
384 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end()); in INITIALIZE_PASS_DEPENDENCY()
394 MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader()); in INITIALIZE_PASS_DEPENDENCY()
404 /// Return true if instruction stores to the specified frame.
408 if (!MI->mayStore()) in InstructionStoresToFI()
411 // writes to all slots. in InstructionStoresToFI()
412 if (MI->memoperands_empty()) in InstructionStoresToFI()
414 for (const MachineMemOperand *MemOp : MI->memoperands()) { in InstructionStoresToFI()
415 if (!MemOp->isStore() || !MemOp->getPseudoValue()) in InstructionStoresToFI()
418 dyn_cast<FixedStackPseudoSourceValue>(MemOp->getPseudoValue())) { in InstructionStoresToFI()
419 if (Value->getFrameIndex() == FI) in InstructionStoresToFI()
429 // FIXME: This intentionally works in reverse due to some issues with the in applyBitsNotInRegMaskToRegUnitsMask()
432 // This is used to apply callee-saved-register masks to the clobbered regunits in applyBitsNotInRegMaskToRegUnitsMask()
435 // The right way to approach this is to start with a BitVector full of ones, in applyBitsNotInRegMaskToRegUnitsMask()
442 // What we have to do for now is the opposite: we have to assume that the in applyBitsNotInRegMaskToRegUnitsMask()
447 // This is to work around an issue which appears in AArch64, but isn't in applyBitsNotInRegMaskToRegUnitsMask()
448 // exclusive to that target: AArch64's Qn registers (128 bits) have Dn in applyBitsNotInRegMaskToRegUnitsMask()
457 // should have an extra RegUnit to model the "unknown" bits not covered by the in applyBitsNotInRegMaskToRegUnitsMask()
489 for (const MachineOperand &MO : MI->operands()) { in ProcessMI()
491 // Remember if the instruction stores to the frame index. in ProcessMI()
494 MFI->isSpillSlotObjectIndex(FI) && in ProcessMI()
518 // If it's using a non-loop-invariant register, then it's obviously in ProcessMI()
519 // not safe to hoist. in ProcessMI()
533 // Non-dead implicit def? This cannot be hoisted. in ProcessMI()
535 // No need to check if a dead implicit def is also defined by in ProcessMI()
569 (TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI))) in ProcessMI()
574 /// Walk the specified region of the CFG and hoist loop invariants out to the
582 unsigned NumRegUnits = TRI->getNumRegUnits(); in HoistRegionPostRA()
591 for (MachineBasicBlock *BB : CurLoop->getBlocks()) { in HoistRegionPostRA()
593 // then don't try to hoist instructions out of this loop. in HoistRegionPostRA()
594 const MachineLoop *ML = MLI->getLoopFor(BB); in HoistRegionPostRA()
595 if (ML && ML->getHeader()->isEHPad()) continue; in HoistRegionPostRA()
597 // Conservatively treat live-in's as an external def. in HoistRegionPostRA()
600 for (const auto &LI : BB->liveins()) { in HoistRegionPostRA()
606 if (const uint32_t *Mask = BB->getBeginClobberMask(TRI)) in HoistRegionPostRA()
616 MachineBasicBlock::iterator TI = Preheader->getFirstTerminator(); in HoistRegionPostRA()
617 if (TI != Preheader->end()) { in HoistRegionPostRA()
618 for (const MachineOperand &MO : TI->operands()) { in HoistRegionPostRA()
655 for (const MachineOperand &MO : MI->all_uses()) { in HoistRegionPostRA()
660 // If it's using a non-loop-invariant register, then it's obviously in HoistRegionPostRA()
661 // not safe to hoist. in HoistRegionPostRA()
676 /// Add register 'Reg' to the livein sets of BBs in the current loop, and make
679 for (MachineBasicBlock *BB : CurLoop->getBlocks()) { in AddToLiveIns()
680 if (!BB->isLiveIn(Reg)) in AddToLiveIns()
681 BB->addLiveIn(Reg); in AddToLiveIns()
686 if (TRI->regsOverlap(Reg, MO.getReg())) in AddToLiveIns()
693 /// When an instruction is found to only use loop invariant operands that is
694 /// safe to hoist, this instruction is called to do the dirty work.
700 // Now move the instructions to the predecessor, inserting it before any in HoistPostRA()
702 LLVM_DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader) in HoistPostRA()
703 << " from " << printMBBReference(*MI->getParent()) << ": " in HoistPostRA()
706 // Splice the instruction to the preheader. in HoistPostRA()
707 MachineBasicBlock *MBB = MI->getParent(); in HoistPostRA()
708 Preheader->splice(Preheader->getFirstTerminator(), MBB, MI); in HoistPostRA()
713 assert(!MI->isDebugInstr() && "Should not hoist debug inst"); in HoistPostRA()
714 MI->setDebugLoc(DebugLoc()); in HoistPostRA()
716 // Add register to livein list to all the BBs in the current loop since a in HoistPostRA()
718 // important to ensure later passes do not scavenge the def register. in HoistPostRA()
725 /// Check if this mbb is guaranteed to execute. If not then a load from this mbb
726 /// may not be safe to hoist.
732 if (BB != CurLoop->getHeader()) { in IsGuaranteedToExecute()
735 CurLoop->getExitingBlocks(CurrentLoopExitingBlocks); in IsGuaranteedToExecute()
737 if (!DT->dominates(BB, CurrentLoopExitingBlock)) { in IsGuaranteedToExecute()
749 /// rematerialize it in this scenario. In that case we do not want to hoist such
753 if (!TII->isTriviallyReMaterializable(MI)) in isTriviallyReMaterializable()
776 /// Destroy scope for the MBB that corresponds to the given dominator tree node
777 /// if its a leaf or all of its children are done. Walk up the dominator tree to
786 ExitScope(Node->getBlock()); in ExitScopeIfDone()
787 // Now traverse upwards to pop ancestors whose offsprings are all done. in ExitScopeIfDone()
789 if (!Parent || --OpenChildren[Parent] != 0) in ExitScopeIfDone()
797 /// order w.r.t the DominatorTree. This allows us to visit definitions before
798 /// uses, allowing us to hoist a loop body in one pass without iteration.
811 // Perform a DFS walk to determine the order of visit. in HoistOutOfLoop()
816 MachineBasicBlock *BB = Node->getBlock(); in HoistOutOfLoop()
819 // then don't try to hoist instructions out of this loop. in HoistOutOfLoop()
820 const MachineLoop *ML = MLI->getLoopFor(BB); in HoistOutOfLoop()
821 if (ML && ML->getHeader()->isEHPad()) in HoistOutOfLoop()
825 if (!CurLoop->contains(BB)) in HoistOutOfLoop()
829 unsigned NumChildren = Node->getNumChildren(); in HoistOutOfLoop()
832 // code to be hoisted that wasn't going to be executed, and increases in HoistOutOfLoop()
833 // register pressure in a situation where it's likely to matter. in HoistOutOfLoop()
834 if (BB->succ_size() >= 25) in HoistOutOfLoop()
842 for (MachineDomTreeNode *Child : reverse(Node->children())) { in HoistOutOfLoop()
859 MachineBasicBlock *MBB = Node->getBlock(); in HoistOutOfLoop()
869 // We have failed to hoist MI to outermost loop's preheader. If MI is in in HoistOutOfLoop()
870 // a subloop, try to hoist it to subloop's preheader. in HoistOutOfLoop()
872 for (MachineLoop *L = MLI->getLoopFor(MI.getParent()); L != CurLoop; in HoistOutOfLoop()
873 L = L->getParentLoop()) in HoistOutOfLoop()
878 MachineBasicBlock *InnerLoopPreheader = InnerLoop->getLoopPreheader(); in HoistOutOfLoop()
893 // If it's a leaf node, it's done. Traverse upwards to pop ancestors. in HoistOutOfLoop()
899 return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg()); in isOperandKill()
902 /// Find all virtual register references that are liveout of the preheader to
911 // the critical edge from the loop predecessor to the loop header. in InitRegPressure()
912 if (BB->pred_size() == 1) { in InitRegPressure()
915 if (!TII->analyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty()) in InitRegPressure()
916 InitRegPressure(*BB->pred_begin()); in InitRegPressure()
929 if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second) in UpdateRegPressure()
939 /// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
940 /// figure out which usages are live-ins.
941 /// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
946 if (MI->isImplicitDef()) in calcRegisterCost()
948 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { in calcRegisterCost()
949 const MachineOperand &MO = MI->getOperand(i); in calcRegisterCost()
956 // FIXME: It seems bad to use RegSeen only for some of these calculations. in calcRegisterCost()
958 const TargetRegisterClass *RC = MRI->getRegClass(Reg); in calcRegisterCost()
960 RegClassWeight W = TRI->getRegClassWeight(RC); in calcRegisterCost()
970 RCCost = -W.RegWeight; in calcRegisterCost()
974 const int *PS = TRI->getRegClassPressureSets(RC); in calcRegisterCost()
975 for (; *PS != -1; ++PS) { in calcRegisterCost()
996 if (const PseudoSourceValue *PSV = MemOp->getPseudoValue()) in mayLoadFromGOTOrConstantPool()
997 if (PSV->isGOT() || PSV->isConstantPool()) in mayLoadFromGOTOrConstantPool()
1008 // A future improvement can be to check if the store registers are constant
1019 // Check that all register operands are caller-preserved physical registers. in isInvariantStore()
1026 Reg = TRI->lookThruCopyLike(MO.getReg(), MRI); in isInvariantStore()
1029 if (!TRI->isCallerPreservedPhysReg(Reg.asMCReg(), *MI.getMF())) in isInvariantStore()
1041 // store instruction. This means that the src of the copy has to satisfy
1048 // FIXME: If targets would like to look through instructions that aren't in isCopyFeedingInvariantStore()
1049 // pure copies, this can be updated to a query. in isCopyFeedingInvariantStore()
1059 if (!TRI->isCallerPreservedPhysReg(CopySrcReg.asMCReg(), *MF)) in isCopyFeedingInvariantStore()
1066 for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) { in isCopyFeedingInvariantStore()
1074 /// e.g. If the instruction is a call, then it's obviously not safe to hoist it.
1076 // Check if it's safe to move the instruction. in IsLICMCandidate()
1080 LLVM_DEBUG(dbgs() << "LICM: Instruction not safe to move.\n"); in IsLICMCandidate()
1084 // If it is a load then check if it is guaranteed to execute by making sure in IsLICMCandidate()
1087 // Loads from constant memory are safe to speculate, for example indexed load in IsLICMCandidate()
1092 LLVM_DEBUG(dbgs() << "LICM: Load not guaranteed to execute.\n"); in IsLICMCandidate()
1096 // Convergent attribute has been used on operations that involve inter-thread in IsLICMCandidate()
1098 // control flows. It is not safe to hoist or sink such operations across in IsLICMCandidate()
1103 if (!TII->shouldHoist(I, CurLoop)) in IsLICMCandidate()
1116 return CurLoop->isLoopInvariant(I); in IsLoopInvariantInst()
1120 /// it could cause a copy to be inserted.
1126 for (const MachineOperand &MO : MI->all_defs()) { in HasLoopPHIUse()
1130 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { in HasLoopPHIUse()
1131 // A PHI may cause a copy to be inserted. in HasLoopPHIUse()
1135 if (CurLoop->contains(&UseMI)) in HasLoopPHIUse()
1137 // A PHI in an exit block can cause a copy to be inserted if the PHI in HasLoopPHIUse()
1145 if (UseMI.isCopy() && CurLoop->contains(&UseMI)) in HasLoopPHIUse()
1158 if (MRI->use_nodbg_empty(Reg)) in HasHighOperandLatency()
1161 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in HasHighOperandLatency()
1164 if (!CurLoop->contains(UseMI.getParent())) in HasHighOperandLatency()
1174 if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i)) in HasHighOperandLatency()
1188 if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike()) in IsCheapInstruction()
1197 --NumDefs; in IsCheapInstruction()
1202 if (!TII->hasLowDefLatency(SchedModel, MI, i)) in IsCheapInstruction()
1210 /// Visit BBs from header to current BB, check if hoisting an instruction of the
1235 /// Traverse the back trace from header to the current block and update their
1236 /// register pressures to reflect the effect of hoisting MI from the current
1237 /// block to the preheader.
1240 // to register pressure. in UpdateBackTraceRegPressure()
1244 // Update register pressure of blocks from loop header to current block. in UpdateBackTraceRegPressure()
1250 /// Return true if it is potentially profitable to hoist the given loop
1260 // - The value defined by the instruction becomes live across the entire in IsProfitableToHoist()
1263 // - If the value is used by a PHI in the loop, a copy will be required for in IsProfitableToHoist()
1266 // - When hoisting the last use of a value in the loop, that value no longer in IsProfitableToHoist()
1267 // needs to be live in the loop. This lowers register pressure in the loop. in IsProfitableToHoist()
1286 // FIXME: If there are long latency loop-invariant instructions inside the in IsProfitableToHoist()
1302 // Estimate register pressure to determine whether to LICM the instruction. in IsProfitableToHoist()
1311 // Visit BBs from header to current BB, if hoisting this doesn't cause in IsProfitableToHoist()
1312 // high register pressure, then it's safe to proceed. in IsProfitableToHoist()
1314 LLVM_DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI); in IsProfitableToHoist()
1326 // instruction is not guaranteed to be executed in the loop, it's best to be in IsProfitableToHoist()
1334 // If we have a COPY with other uses in the loop, hoist to allow the users to in IsProfitableToHoist()
1343 MRI->isConstantPhysReg(UseOp.getReg()); in IsProfitableToHoist()
1346 any_of(MRI->use_nodbg_instructions(DefReg), in IsProfitableToHoist()
1348 if (!CurLoop->contains(&UseMI)) in IsProfitableToHoist()
1352 // high RP we're fine to hoist it even if the user can't be in IsProfitableToHoist()
1353 // hoisted later Otherwise we want to check the user if it's in IsProfitableToHoist()
1356 !CurLoop->isLoopInvariant(UseMI, DefReg)) in IsProfitableToHoist()
1365 // to be remat'ed. in IsProfitableToHoist()
1368 LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI); in IsProfitableToHoist()
1381 if (MI->canFoldAsLoad()) in ExtractHoistableLoad()
1384 // If not, we may be able to unfold a load and hoist that. in ExtractHoistableLoad()
1387 if (!MI->isDereferenceableInvariantLoad()) in ExtractHoistableLoad()
1393 TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(), in ExtractHoistableLoad()
1398 const MCInstrDesc &MID = TII->get(NewOpc); in ExtractHoistableLoad() local
1399 MachineFunction &MF = *MI->getMF(); in ExtractHoistableLoad()
1400 const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF); in ExtractHoistableLoad()
1402 Register Reg = MRI->createVirtualRegister(RC); in ExtractHoistableLoad()
1405 bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg, in ExtractHoistableLoad()
1414 MachineBasicBlock *MBB = MI->getParent(); in ExtractHoistableLoad()
1416 MBB->insert(Pos, NewMIs[0]); in ExtractHoistableLoad()
1417 MBB->insert(Pos, NewMIs[1]); in ExtractHoistableLoad()
1418 // If unfolding produced a load that wasn't loop-invariant or profitable to in ExtractHoistableLoad()
1422 NewMIs[0]->eraseFromParent(); in ExtractHoistableLoad()
1423 NewMIs[1]->eraseFromParent(); in ExtractHoistableLoad()
1433 if (MI->shouldUpdateCallSiteInfo()) in ExtractHoistableLoad()
1436 MI->eraseFromParent(); in ExtractHoistableLoad()
1451 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end()); in InitializeLoadsHoistableLoops()
1455 // pre-order DFS. in InitializeLoadsHoistableLoops()
1460 Worklist.insert(Worklist.end(), L->getSubLoops().begin(), in InitializeLoadsHoistableLoops()
1461 L->getSubLoops().end()); in InitializeLoadsHoistableLoops()
1464 // Going from the innermost to outermost loops, check if a loop has in InitializeLoadsHoistableLoops()
1466 // found, mark this loop and its parent as non-hoistable and continue in InitializeLoadsHoistableLoops()
1468 // Visiting in a reversed pre-ordered DFS manner in InitializeLoadsHoistableLoops()
1469 // allows us to not process all the instructions of the outer loop if the in InitializeLoadsHoistableLoops()
1470 // inner loop is proved to be non-load-hoistable. in InitializeLoadsHoistableLoops()
1472 for (auto *MBB : Loop->blocks()) { in InitializeLoadsHoistableLoops()
1473 // If this loop has already been marked as non-hoistable, skip it. in InitializeLoadsHoistableLoops()
1480 for (MachineLoop *L = Loop; L != nullptr; L = L->getParentLoop()) in InitializeLoadsHoistableLoops()
1494 if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr))) in LookForDuplicate()
1502 /// the existing instruction rather than hoisting the instruction to the
1509 if (MI->isImplicitDef()) in EliminateCSE()
1514 if (MI->mayLoad() && !MI->isDereferenceableInvariantLoad()) in EliminateCSE()
1517 if (MachineInstr *Dup = LookForDuplicate(MI, CI->second)) { in EliminateCSE()
1523 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { in EliminateCSE()
1524 const MachineOperand &MO = MI->getOperand(i); in EliminateCSE()
1528 MO.getReg() == Dup->getOperand(i).getReg()) && in EliminateCSE()
1538 Register Reg = MI->getOperand(Idx).getReg(); in EliminateCSE()
1539 Register DupReg = Dup->getOperand(Idx).getReg(); in EliminateCSE()
1540 OrigRCs.push_back(MRI->getRegClass(DupReg)); in EliminateCSE()
1542 if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) { in EliminateCSE()
1545 MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]); in EliminateCSE()
1551 Register Reg = MI->getOperand(Idx).getReg(); in EliminateCSE()
1552 Register DupReg = Dup->getOperand(Idx).getReg(); in EliminateCSE()
1553 MRI->replaceRegWith(Reg, DupReg); in EliminateCSE()
1554 MRI->clearKillFlags(DupReg); in EliminateCSE()
1556 if (!MRI->use_nodbg_empty(DupReg)) in EliminateCSE()
1557 Dup->getOperand(Idx).setIsDead(false); in EliminateCSE()
1560 MI->eraseFromParent(); in EliminateCSE()
1570 if (MI->mayLoad() && !MI->isDereferenceableInvariantLoad()) in MayCSE()
1573 unsigned Opcode = MI->getOpcode(); in MayCSE()
1576 if (DT->dominates(Map.first, MI->getParent())) { in MayCSE()
1581 if (CI == Map.second.end() || MI->isImplicitDef()) in MayCSE()
1583 if (LookForDuplicate(MI, CI->second) != nullptr) in MayCSE()
1591 /// When an instruction is found to use only loop invariant operands
1592 /// that are safe to hoist, this instruction is called to do the dirty work.
1596 MachineBasicBlock *SrcBlock = MI->getParent(); in Hoist()
1598 // Disable the instruction hoisting due to block hotness in Hoist()
1618 if (MI->mayStore()) in Hoist()
1621 // Now move the instructions to the predecessor, inserting it before any in Hoist()
1625 if (MI->getParent()->getBasicBlock()) in Hoist()
1626 dbgs() << " from " << printMBBReference(*MI->getParent()); in Hoist()
1627 if (Preheader->getBasicBlock()) in Hoist()
1628 dbgs() << " to " << printMBBReference(*Preheader); in Hoist()
1632 // If this is the first instruction being hoisted to the preheader, in Hoist()
1639 // Look for opportunity to CSE the hoisted instruction. in Hoist()
1640 unsigned Opcode = MI->getOpcode(); in Hoist()
1644 if (DT->dominates(Map.first, MI->getParent())) { in Hoist()
1657 // Otherwise, splice the instruction to the preheader. in Hoist()
1658 Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI); in Hoist()
1663 assert(!MI->isDebugInstr() && "Should not hoist debug inst"); in Hoist()
1664 MI->setDebugLoc(DebugLoc()); in Hoist()
1666 // Update register pressure for BBs from header to this block. in Hoist()
1670 // since they may need to be live throughout the entire loop in Hoist()
1672 for (MachineOperand &MO : MI->all_defs()) in Hoist()
1674 MRI->clearKillFlags(MO.getReg()); in Hoist()
1691 // Determine the block to which to hoist instructions. If we can't find a in getCurPreheader()
1694 // If we've tried to get a preheader and failed, don't try again. in getCurPreheader()
1695 if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1)) in getCurPreheader()
1699 CurPreheader = CurLoop->getLoopPreheader(); in getCurPreheader()
1701 MachineBasicBlock *Pred = CurLoop->getLoopPredecessor(); in getCurPreheader()
1703 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1); in getCurPreheader()
1707 CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), *this); in getCurPreheader()
1709 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1); in getCurPreheader()
1722 uint64_t SrcBF = MBFI->getBlockFreq(SrcBlock).getFrequency(); in isTgtHotterThanSrc()
1723 uint64_t DstBF = MBFI->getBlockFreq(TgtBlock).getFrequency(); in isTgtHotterThanSrc()