xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- ShrinkWrap.cpp - Compute safe point for prolog/epilog insertion ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass looks for safe point where the prologue and epilogue can be
10 // inserted.
11 // The safe point for the prologue (resp. epilogue) is called Save
12 // (resp. Restore).
13 // A point is safe for prologue (resp. epilogue) if and only if
14 // it 1) dominates (resp. post-dominates) all the frame related operations and
15 // between 2) two executions of the Save (resp. Restore) point there is an
16 // execution of the Restore (resp. Save) point.
17 //
18 // For instance, the following points are safe:
19 // for (int i = 0; i < 10; ++i) {
20 //   Save
21 //   ...
22 //   Restore
23 // }
24 // Indeed, the execution looks like Save -> Restore -> Save -> Restore ...
25 // And the following points are not:
26 // for (int i = 0; i < 10; ++i) {
27 //   Save
28 //   ...
29 // }
30 // for (int i = 0; i < 10; ++i) {
31 //   ...
32 //   Restore
33 // }
34 // Indeed, the execution looks like Save -> Save -> ... -> Restore -> Restore.
35 //
36 // This pass also ensures that the safe points are 3) cheaper than the regular
37 // entry and exits blocks.
38 //
39 // Property #1 is ensured via the use of MachineDominatorTree and
40 // MachinePostDominatorTree.
41 // Property #2 is ensured via property #1 and MachineLoopInfo, i.e., both
42 // points must be in the same loop.
43 // Property #3 is ensured via the MachineBlockFrequencyInfo.
44 //
45 // If this pass found points matching all these properties, then
46 // MachineFrameInfo is updated with this information.
47 //
48 //===----------------------------------------------------------------------===//
49 
50 #include "llvm/ADT/BitVector.h"
51 #include "llvm/ADT/PostOrderIterator.h"
52 #include "llvm/ADT/SetVector.h"
53 #include "llvm/ADT/SmallVector.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/CFG.h"
56 #include "llvm/Analysis/ValueTracking.h"
57 #include "llvm/CodeGen/MachineBasicBlock.h"
58 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
59 #include "llvm/CodeGen/MachineDominators.h"
60 #include "llvm/CodeGen/MachineFrameInfo.h"
61 #include "llvm/CodeGen/MachineFunction.h"
62 #include "llvm/CodeGen/MachineFunctionPass.h"
63 #include "llvm/CodeGen/MachineInstr.h"
64 #include "llvm/CodeGen/MachineLoopInfo.h"
65 #include "llvm/CodeGen/MachineOperand.h"
66 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
67 #include "llvm/CodeGen/MachinePostDominators.h"
68 #include "llvm/CodeGen/RegisterClassInfo.h"
69 #include "llvm/CodeGen/RegisterScavenging.h"
70 #include "llvm/CodeGen/TargetFrameLowering.h"
71 #include "llvm/CodeGen/TargetInstrInfo.h"
72 #include "llvm/CodeGen/TargetLowering.h"
73 #include "llvm/CodeGen/TargetRegisterInfo.h"
74 #include "llvm/CodeGen/TargetSubtargetInfo.h"
75 #include "llvm/IR/Attributes.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/InitializePasses.h"
78 #include "llvm/MC/MCAsmInfo.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/CommandLine.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include "llvm/Target/TargetMachine.h"
85 #include <cassert>
86 #include <cstdint>
87 #include <memory>
88 
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "shrink-wrap"
92 
93 STATISTIC(NumFunc, "Number of functions");
94 STATISTIC(NumCandidates, "Number of shrink-wrapping candidates");
95 STATISTIC(NumCandidatesDropped,
96           "Number of shrink-wrapping candidates dropped because of frequency");
97 
98 static cl::opt<cl::boolOrDefault>
99 EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden,
100                     cl::desc("enable the shrink-wrapping pass"));
101 static cl::opt<bool> EnablePostShrinkWrapOpt(
102     "enable-shrink-wrap-region-split", cl::init(true), cl::Hidden,
103     cl::desc("enable splitting of the restore block if possible"));
104 
105 namespace {
106 
107 /// Class to determine where the safe point to insert the
108 /// prologue and epilogue are.
109 /// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the
110 /// shrink-wrapping term for prologue/epilogue placement, this pass
111 /// does not rely on expensive data-flow analysis. Instead we use the
112 /// dominance properties and loop information to decide which point
113 /// are safe for such insertion.
114 class ShrinkWrap : public MachineFunctionPass {
115   /// Hold callee-saved information.
116   RegisterClassInfo RCI;
117   MachineDominatorTree *MDT = nullptr;
118   MachinePostDominatorTree *MPDT = nullptr;
119 
120   /// Current safe point found for the prologue.
121   /// The prologue will be inserted before the first instruction
122   /// in this basic block.
123   MachineBasicBlock *Save = nullptr;
124 
125   /// Current safe point found for the epilogue.
126   /// The epilogue will be inserted before the first terminator instruction
127   /// in this basic block.
128   MachineBasicBlock *Restore = nullptr;
129 
130   /// Hold the information of the basic block frequency.
131   /// Use to check the profitability of the new points.
132   MachineBlockFrequencyInfo *MBFI = nullptr;
133 
134   /// Hold the loop information. Used to determine if Save and Restore
135   /// are in the same loop.
136   MachineLoopInfo *MLI = nullptr;
137 
138   // Emit remarks.
139   MachineOptimizationRemarkEmitter *ORE = nullptr;
140 
141   /// Frequency of the Entry block.
142   BlockFrequency EntryFreq;
143 
144   /// Current opcode for frame setup.
145   unsigned FrameSetupOpcode = ~0u;
146 
147   /// Current opcode for frame destroy.
148   unsigned FrameDestroyOpcode = ~0u;
149 
150   /// Stack pointer register, used by llvm.{savestack,restorestack}
151   Register SP;
152 
153   /// Entry block.
154   const MachineBasicBlock *Entry = nullptr;
155 
156   using SetOfRegs = SmallSetVector<unsigned, 16>;
157 
158   /// Registers that need to be saved for the current function.
159   mutable SetOfRegs CurrentCSRs;
160 
161   /// Current MachineFunction.
162   MachineFunction *MachineFunc = nullptr;
163 
164   /// Is `true` for the block numbers where we assume possible stack accesses
165   /// or computation of stack-relative addresses on any CFG path including the
166   /// block itself. Is `false` for basic blocks where we can guarantee the
167   /// opposite. False positives won't lead to incorrect analysis results,
168   /// therefore this approach is fair.
169   BitVector StackAddressUsedBlockInfo;
170 
171   /// Check if \p MI uses or defines a callee-saved register or
172   /// a frame index. If this is the case, this means \p MI must happen
173   /// after Save and before Restore.
174   bool useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
175                        bool StackAddressUsed) const;
176 
getCurrentCSRs(RegScavenger * RS) const177   const SetOfRegs &getCurrentCSRs(RegScavenger *RS) const {
178     if (CurrentCSRs.empty()) {
179       BitVector SavedRegs;
180       const TargetFrameLowering *TFI =
181           MachineFunc->getSubtarget().getFrameLowering();
182 
183       TFI->determineCalleeSaves(*MachineFunc, SavedRegs, RS);
184 
185       for (int Reg = SavedRegs.find_first(); Reg != -1;
186            Reg = SavedRegs.find_next(Reg))
187         CurrentCSRs.insert((unsigned)Reg);
188     }
189     return CurrentCSRs;
190   }
191 
192   /// Update the Save and Restore points such that \p MBB is in
193   /// the region that is dominated by Save and post-dominated by Restore
194   /// and Save and Restore still match the safe point definition.
195   /// Such point may not exist and Save and/or Restore may be null after
196   /// this call.
197   void updateSaveRestorePoints(MachineBasicBlock &MBB, RegScavenger *RS);
198 
199   // Try to find safe point based on dominance and block frequency without
200   // any change in IR.
201   bool performShrinkWrapping(
202       const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
203       RegScavenger *RS);
204 
205   /// This function tries to split the restore point if doing so can shrink the
206   /// save point further. \return True if restore point is split.
207   bool postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
208                           RegScavenger *RS);
209 
210   /// This function analyzes if the restore point can split to create a new
211   /// restore point. This function collects
212   /// 1. Any preds of current restore that are reachable by callee save/FI
213   /// blocks
214   /// - indicated by DirtyPreds
215   /// 2. Any preds of current restore that are not DirtyPreds - indicated by
216   /// CleanPreds
217   /// Both sets should be non-empty for considering restore point split.
218   bool checkIfRestoreSplittable(
219       const MachineBasicBlock *CurRestore,
220       const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
221       SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
222       SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
223       const TargetInstrInfo *TII, RegScavenger *RS);
224 
225   /// Initialize the pass for \p MF.
init(MachineFunction & MF)226   void init(MachineFunction &MF) {
227     RCI.runOnMachineFunction(MF);
228     MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
229     MPDT = &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
230     Save = nullptr;
231     Restore = nullptr;
232     MBFI = &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI();
233     MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
234     ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
235     EntryFreq = MBFI->getEntryFreq();
236     const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
237     const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
238     FrameSetupOpcode = TII.getCallFrameSetupOpcode();
239     FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
240     SP = Subtarget.getTargetLowering()->getStackPointerRegisterToSaveRestore();
241     Entry = &MF.front();
242     CurrentCSRs.clear();
243     MachineFunc = &MF;
244 
245     ++NumFunc;
246   }
247 
248   /// Check whether or not Save and Restore points are still interesting for
249   /// shrink-wrapping.
ArePointsInteresting() const250   bool ArePointsInteresting() const { return Save != Entry && Save && Restore; }
251 
252   /// Check if shrink wrapping is enabled for this target and function.
253   static bool isShrinkWrapEnabled(const MachineFunction &MF);
254 
255 public:
256   static char ID;
257 
ShrinkWrap()258   ShrinkWrap() : MachineFunctionPass(ID) {
259     initializeShrinkWrapPass(*PassRegistry::getPassRegistry());
260   }
261 
getAnalysisUsage(AnalysisUsage & AU) const262   void getAnalysisUsage(AnalysisUsage &AU) const override {
263     AU.setPreservesAll();
264     AU.addRequired<MachineBlockFrequencyInfoWrapperPass>();
265     AU.addRequired<MachineDominatorTreeWrapperPass>();
266     AU.addRequired<MachinePostDominatorTreeWrapperPass>();
267     AU.addRequired<MachineLoopInfoWrapperPass>();
268     AU.addRequired<MachineOptimizationRemarkEmitterPass>();
269     MachineFunctionPass::getAnalysisUsage(AU);
270   }
271 
getRequiredProperties() const272   MachineFunctionProperties getRequiredProperties() const override {
273     return MachineFunctionProperties().set(
274       MachineFunctionProperties::Property::NoVRegs);
275   }
276 
getPassName() const277   StringRef getPassName() const override { return "Shrink Wrapping analysis"; }
278 
279   /// Perform the shrink-wrapping analysis and update
280   /// the MachineFrameInfo attached to \p MF with the results.
281   bool runOnMachineFunction(MachineFunction &MF) override;
282 };
283 
284 } // end anonymous namespace
285 
286 char ShrinkWrap::ID = 0;
287 
288 char &llvm::ShrinkWrapID = ShrinkWrap::ID;
289 
290 INITIALIZE_PASS_BEGIN(ShrinkWrap, DEBUG_TYPE, "Shrink Wrap Pass", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfoWrapperPass)291 INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfoWrapperPass)
292 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
293 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass)
294 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
295 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
296 INITIALIZE_PASS_END(ShrinkWrap, DEBUG_TYPE, "Shrink Wrap Pass", false, false)
297 
298 bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
299                                  bool StackAddressUsed) const {
300   /// Check if \p Op is known to access an address not on the function's stack .
301   /// At the moment, accesses where the underlying object is a global, function
302   /// argument, or jump table are considered non-stack accesses. Note that the
303   /// caller's stack may get accessed when passing an argument via the stack,
304   /// but not the stack of the current function.
305   ///
306   auto IsKnownNonStackPtr = [](MachineMemOperand *Op) {
307     if (Op->getValue()) {
308       const Value *UO = getUnderlyingObject(Op->getValue());
309       if (!UO)
310         return false;
311       if (auto *Arg = dyn_cast<Argument>(UO))
312         return !Arg->hasPassPointeeByValueCopyAttr();
313       return isa<GlobalValue>(UO);
314     }
315     if (const PseudoSourceValue *PSV = Op->getPseudoValue())
316       return PSV->isJumpTable();
317     return false;
318   };
319   // Load/store operations may access the stack indirectly when we previously
320   // computed an address to a stack location.
321   if (StackAddressUsed && MI.mayLoadOrStore() &&
322       (MI.isCall() || MI.hasUnmodeledSideEffects() || MI.memoperands_empty() ||
323        !all_of(MI.memoperands(), IsKnownNonStackPtr)))
324     return true;
325 
326   if (MI.getOpcode() == FrameSetupOpcode ||
327       MI.getOpcode() == FrameDestroyOpcode) {
328     LLVM_DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
329     return true;
330   }
331   const MachineFunction *MF = MI.getParent()->getParent();
332   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
333   for (const MachineOperand &MO : MI.operands()) {
334     bool UseOrDefCSR = false;
335     if (MO.isReg()) {
336       // Ignore instructions like DBG_VALUE which don't read/def the register.
337       if (!MO.isDef() && !MO.readsReg())
338         continue;
339       Register PhysReg = MO.getReg();
340       if (!PhysReg)
341         continue;
342       assert(PhysReg.isPhysical() && "Unallocated register?!");
343       // The stack pointer is not normally described as a callee-saved register
344       // in calling convention definitions, so we need to watch for it
345       // separately. An SP mentioned by a call instruction, we can ignore,
346       // though, as it's harmless and we do not want to effectively disable tail
347       // calls by forcing the restore point to post-dominate them.
348       // PPC's LR is also not normally described as a callee-saved register in
349       // calling convention definitions, so we need to watch for it, too. An LR
350       // mentioned implicitly by a return (or "branch to link register")
351       // instruction we can ignore, otherwise we may pessimize shrinkwrapping.
352       UseOrDefCSR =
353           (!MI.isCall() && PhysReg == SP) ||
354           RCI.getLastCalleeSavedAlias(PhysReg) ||
355           (!MI.isReturn() && TRI->isNonallocatableRegisterCalleeSave(PhysReg));
356     } else if (MO.isRegMask()) {
357       // Check if this regmask clobbers any of the CSRs.
358       for (unsigned Reg : getCurrentCSRs(RS)) {
359         if (MO.clobbersPhysReg(Reg)) {
360           UseOrDefCSR = true;
361           break;
362         }
363       }
364     }
365     // Skip FrameIndex operands in DBG_VALUE instructions.
366     if (UseOrDefCSR || (MO.isFI() && !MI.isDebugValue())) {
367       LLVM_DEBUG(dbgs() << "Use or define CSR(" << UseOrDefCSR << ") or FI("
368                         << MO.isFI() << "): " << MI << '\n');
369       return true;
370     }
371   }
372   return false;
373 }
374 
375 /// Helper function to find the immediate (post) dominator.
376 template <typename ListOfBBs, typename DominanceAnalysis>
FindIDom(MachineBasicBlock & Block,ListOfBBs BBs,DominanceAnalysis & Dom,bool Strict=true)377 static MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs,
378                                    DominanceAnalysis &Dom, bool Strict = true) {
379   MachineBasicBlock *IDom = &Block;
380   for (MachineBasicBlock *BB : BBs) {
381     IDom = Dom.findNearestCommonDominator(IDom, BB);
382     if (!IDom)
383       break;
384   }
385   if (Strict && IDom == &Block)
386     return nullptr;
387   return IDom;
388 }
389 
isAnalyzableBB(const TargetInstrInfo & TII,MachineBasicBlock & Entry)390 static bool isAnalyzableBB(const TargetInstrInfo &TII,
391                            MachineBasicBlock &Entry) {
392   // Check if the block is analyzable.
393   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
394   SmallVector<MachineOperand, 4> Cond;
395   return !TII.analyzeBranch(Entry, TBB, FBB, Cond);
396 }
397 
398 /// Determines if any predecessor of MBB is on the path from block that has use
399 /// or def of CSRs/FI to MBB.
400 /// ReachableByDirty: All blocks reachable from block that has use or def of
401 /// CSR/FI.
402 static bool
hasDirtyPred(const DenseSet<const MachineBasicBlock * > & ReachableByDirty,const MachineBasicBlock & MBB)403 hasDirtyPred(const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
404              const MachineBasicBlock &MBB) {
405   for (const MachineBasicBlock *PredBB : MBB.predecessors())
406     if (ReachableByDirty.count(PredBB))
407       return true;
408   return false;
409 }
410 
411 /// Derives the list of all the basic blocks reachable from MBB.
markAllReachable(DenseSet<const MachineBasicBlock * > & Visited,const MachineBasicBlock & MBB)412 static void markAllReachable(DenseSet<const MachineBasicBlock *> &Visited,
413                              const MachineBasicBlock &MBB) {
414   SmallVector<MachineBasicBlock *, 4> Worklist(MBB.succ_begin(),
415                                                MBB.succ_end());
416   Visited.insert(&MBB);
417   while (!Worklist.empty()) {
418     MachineBasicBlock *SuccMBB = Worklist.pop_back_val();
419     if (!Visited.insert(SuccMBB).second)
420       continue;
421     Worklist.append(SuccMBB->succ_begin(), SuccMBB->succ_end());
422   }
423 }
424 
425 /// Collect blocks reachable by use or def of CSRs/FI.
collectBlocksReachableByDirty(const DenseSet<const MachineBasicBlock * > & DirtyBBs,DenseSet<const MachineBasicBlock * > & ReachableByDirty)426 static void collectBlocksReachableByDirty(
427     const DenseSet<const MachineBasicBlock *> &DirtyBBs,
428     DenseSet<const MachineBasicBlock *> &ReachableByDirty) {
429   for (const MachineBasicBlock *MBB : DirtyBBs) {
430     if (ReachableByDirty.count(MBB))
431       continue;
432     // Mark all offsprings as reachable.
433     markAllReachable(ReachableByDirty, *MBB);
434   }
435 }
436 
437 /// \return true if there is a clean path from SavePoint to the original
438 /// Restore.
439 static bool
isSaveReachableThroughClean(const MachineBasicBlock * SavePoint,ArrayRef<MachineBasicBlock * > CleanPreds)440 isSaveReachableThroughClean(const MachineBasicBlock *SavePoint,
441                             ArrayRef<MachineBasicBlock *> CleanPreds) {
442   DenseSet<const MachineBasicBlock *> Visited;
443   SmallVector<MachineBasicBlock *, 4> Worklist(CleanPreds.begin(),
444                                                CleanPreds.end());
445   while (!Worklist.empty()) {
446     MachineBasicBlock *CleanBB = Worklist.pop_back_val();
447     if (CleanBB == SavePoint)
448       return true;
449     if (!Visited.insert(CleanBB).second || !CleanBB->pred_size())
450       continue;
451     Worklist.append(CleanBB->pred_begin(), CleanBB->pred_end());
452   }
453   return false;
454 }
455 
456 /// This function updates the branches post restore point split.
457 ///
458 /// Restore point has been split.
459 /// Old restore point: MBB
460 /// New restore point: NMBB
461 /// Any basic block(say BBToUpdate) which had a fallthrough to MBB
462 /// previously should
463 /// 1. Fallthrough to NMBB iff NMBB is inserted immediately above MBB in the
464 /// block layout OR
465 /// 2. Branch unconditionally to NMBB iff NMBB is inserted at any other place.
updateTerminator(MachineBasicBlock * BBToUpdate,MachineBasicBlock * NMBB,const TargetInstrInfo * TII)466 static void updateTerminator(MachineBasicBlock *BBToUpdate,
467                              MachineBasicBlock *NMBB,
468                              const TargetInstrInfo *TII) {
469   DebugLoc DL = BBToUpdate->findBranchDebugLoc();
470   // if NMBB isn't the new layout successor for BBToUpdate, insert unconditional
471   // branch to it
472   if (!BBToUpdate->isLayoutSuccessor(NMBB))
473     TII->insertUnconditionalBranch(*BBToUpdate, NMBB, DL);
474 }
475 
476 /// This function splits the restore point and returns new restore point/BB.
477 ///
478 /// DirtyPreds: Predessors of \p MBB that are ReachableByDirty
479 ///
480 /// Decision has been made to split the restore point.
481 /// old restore point: \p MBB
482 /// new restore point: \p NMBB
483 /// This function makes the necessary block layout changes so that
484 /// 1. \p NMBB points to \p MBB unconditionally
485 /// 2. All dirtyPreds that previously pointed to \p MBB point to \p NMBB
486 static MachineBasicBlock *
tryToSplitRestore(MachineBasicBlock * MBB,ArrayRef<MachineBasicBlock * > DirtyPreds,const TargetInstrInfo * TII)487 tryToSplitRestore(MachineBasicBlock *MBB,
488                   ArrayRef<MachineBasicBlock *> DirtyPreds,
489                   const TargetInstrInfo *TII) {
490   MachineFunction *MF = MBB->getParent();
491 
492   // get the list of DirtyPreds who have a fallthrough to MBB
493   // before the block layout change. This is just to ensure that if the NMBB is
494   // inserted after MBB, then we create unconditional branch from
495   // DirtyPred/CleanPred to NMBB
496   SmallPtrSet<MachineBasicBlock *, 8> MBBFallthrough;
497   for (MachineBasicBlock *BB : DirtyPreds)
498     if (BB->getFallThrough(false) == MBB)
499       MBBFallthrough.insert(BB);
500 
501   MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
502   // Insert this block at the end of the function. Inserting in between may
503   // interfere with control flow optimizer decisions.
504   MF->insert(MF->end(), NMBB);
505 
506   for (const MachineBasicBlock::RegisterMaskPair &LI : MBB->liveins())
507     NMBB->addLiveIn(LI.PhysReg);
508 
509   TII->insertUnconditionalBranch(*NMBB, MBB, DebugLoc());
510 
511   // After splitting, all predecessors of the restore point should be dirty
512   // blocks.
513   for (MachineBasicBlock *SuccBB : DirtyPreds)
514     SuccBB->ReplaceUsesOfBlockWith(MBB, NMBB);
515 
516   NMBB->addSuccessor(MBB);
517 
518   for (MachineBasicBlock *BBToUpdate : MBBFallthrough)
519     updateTerminator(BBToUpdate, NMBB, TII);
520 
521   return NMBB;
522 }
523 
524 /// This function undoes the restore point split done earlier.
525 ///
526 /// DirtyPreds: All predecessors of \p NMBB that are ReachableByDirty.
527 ///
528 /// Restore point was split and the change needs to be unrolled. Make necessary
529 /// changes to reset restore point from \p NMBB to \p MBB.
rollbackRestoreSplit(MachineFunction & MF,MachineBasicBlock * NMBB,MachineBasicBlock * MBB,ArrayRef<MachineBasicBlock * > DirtyPreds,const TargetInstrInfo * TII)530 static void rollbackRestoreSplit(MachineFunction &MF, MachineBasicBlock *NMBB,
531                                  MachineBasicBlock *MBB,
532                                  ArrayRef<MachineBasicBlock *> DirtyPreds,
533                                  const TargetInstrInfo *TII) {
534   // For a BB, if NMBB is fallthrough in the current layout, then in the new
535   // layout a. BB should fallthrough to MBB OR b. BB should undconditionally
536   // branch to MBB
537   SmallPtrSet<MachineBasicBlock *, 8> NMBBFallthrough;
538   for (MachineBasicBlock *BB : DirtyPreds)
539     if (BB->getFallThrough(false) == NMBB)
540       NMBBFallthrough.insert(BB);
541 
542   NMBB->removeSuccessor(MBB);
543   for (MachineBasicBlock *SuccBB : DirtyPreds)
544     SuccBB->ReplaceUsesOfBlockWith(NMBB, MBB);
545 
546   NMBB->erase(NMBB->begin(), NMBB->end());
547   NMBB->eraseFromParent();
548 
549   for (MachineBasicBlock *BBToUpdate : NMBBFallthrough)
550     updateTerminator(BBToUpdate, MBB, TII);
551 }
552 
553 // A block is deemed fit for restore point split iff there exist
554 // 1. DirtyPreds - preds of CurRestore reachable from use or def of CSR/FI
555 // 2. CleanPreds - preds of CurRestore that arent DirtyPreds
checkIfRestoreSplittable(const MachineBasicBlock * CurRestore,const DenseSet<const MachineBasicBlock * > & ReachableByDirty,SmallVectorImpl<MachineBasicBlock * > & DirtyPreds,SmallVectorImpl<MachineBasicBlock * > & CleanPreds,const TargetInstrInfo * TII,RegScavenger * RS)556 bool ShrinkWrap::checkIfRestoreSplittable(
557     const MachineBasicBlock *CurRestore,
558     const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
559     SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
560     SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
561     const TargetInstrInfo *TII, RegScavenger *RS) {
562   for (const MachineInstr &MI : *CurRestore)
563     if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true))
564       return false;
565 
566   for (MachineBasicBlock *PredBB : CurRestore->predecessors()) {
567     if (!isAnalyzableBB(*TII, *PredBB))
568       return false;
569 
570     if (ReachableByDirty.count(PredBB))
571       DirtyPreds.push_back(PredBB);
572     else
573       CleanPreds.push_back(PredBB);
574   }
575 
576   return !(CleanPreds.empty() || DirtyPreds.empty());
577 }
578 
postShrinkWrapping(bool HasCandidate,MachineFunction & MF,RegScavenger * RS)579 bool ShrinkWrap::postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
580                                     RegScavenger *RS) {
581   if (!EnablePostShrinkWrapOpt)
582     return false;
583 
584   MachineBasicBlock *InitSave = nullptr;
585   MachineBasicBlock *InitRestore = nullptr;
586 
587   if (HasCandidate) {
588     InitSave = Save;
589     InitRestore = Restore;
590   } else {
591     InitRestore = nullptr;
592     InitSave = &MF.front();
593     for (MachineBasicBlock &MBB : MF) {
594       if (MBB.isEHFuncletEntry())
595         return false;
596       if (MBB.isReturnBlock()) {
597         // Do not support multiple restore points.
598         if (InitRestore)
599           return false;
600         InitRestore = &MBB;
601       }
602     }
603   }
604 
605   if (!InitSave || !InitRestore || InitRestore == InitSave ||
606       !MDT->dominates(InitSave, InitRestore) ||
607       !MPDT->dominates(InitRestore, InitSave))
608     return false;
609 
610   // Bail out of the optimization if any of the basic block is target of
611   // INLINEASM_BR instruction
612   for (MachineBasicBlock &MBB : MF)
613     if (MBB.isInlineAsmBrIndirectTarget())
614       return false;
615 
616   DenseSet<const MachineBasicBlock *> DirtyBBs;
617   for (MachineBasicBlock &MBB : MF) {
618     if (MBB.isEHPad()) {
619       DirtyBBs.insert(&MBB);
620       continue;
621     }
622     for (const MachineInstr &MI : MBB)
623       if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true)) {
624         DirtyBBs.insert(&MBB);
625         break;
626       }
627   }
628 
629   // Find blocks reachable from the use or def of CSRs/FI.
630   DenseSet<const MachineBasicBlock *> ReachableByDirty;
631   collectBlocksReachableByDirty(DirtyBBs, ReachableByDirty);
632 
633   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
634   SmallVector<MachineBasicBlock *, 2> DirtyPreds;
635   SmallVector<MachineBasicBlock *, 2> CleanPreds;
636   if (!checkIfRestoreSplittable(InitRestore, ReachableByDirty, DirtyPreds,
637                                 CleanPreds, TII, RS))
638     return false;
639 
640   // Trying to reach out to the new save point which dominates all dirty blocks.
641   MachineBasicBlock *NewSave =
642       FindIDom<>(**DirtyPreds.begin(), DirtyPreds, *MDT, false);
643 
644   while (NewSave && (hasDirtyPred(ReachableByDirty, *NewSave) ||
645                      EntryFreq < MBFI->getBlockFreq(NewSave) ||
646                      /*Entry freq has been observed more than a loop block in
647                         some cases*/
648                      MLI->getLoopFor(NewSave)))
649     NewSave = FindIDom<>(**NewSave->pred_begin(), NewSave->predecessors(), *MDT,
650                          false);
651 
652   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
653   if (!NewSave || NewSave == InitSave ||
654       isSaveReachableThroughClean(NewSave, CleanPreds) ||
655       !TFI->canUseAsPrologue(*NewSave))
656     return false;
657 
658   // Now we know that splitting a restore point can isolate the restore point
659   // from clean blocks and doing so can shrink the save point.
660   MachineBasicBlock *NewRestore =
661       tryToSplitRestore(InitRestore, DirtyPreds, TII);
662 
663   // Make sure if the new restore point is valid as an epilogue, depending on
664   // targets.
665   if (!TFI->canUseAsEpilogue(*NewRestore)) {
666     rollbackRestoreSplit(MF, NewRestore, InitRestore, DirtyPreds, TII);
667     return false;
668   }
669 
670   Save = NewSave;
671   Restore = NewRestore;
672 
673   MDT->recalculate(MF);
674   MPDT->recalculate(MF);
675 
676   assert((MDT->dominates(Save, Restore) && MPDT->dominates(Restore, Save)) &&
677          "Incorrect save or restore point due to dominance relations");
678   assert((!MLI->getLoopFor(Save) && !MLI->getLoopFor(Restore)) &&
679          "Unexpected save or restore point in a loop");
680   assert((EntryFreq >= MBFI->getBlockFreq(Save) &&
681           EntryFreq >= MBFI->getBlockFreq(Restore)) &&
682          "Incorrect save or restore point based on block frequency");
683   return true;
684 }
685 
updateSaveRestorePoints(MachineBasicBlock & MBB,RegScavenger * RS)686 void ShrinkWrap::updateSaveRestorePoints(MachineBasicBlock &MBB,
687                                          RegScavenger *RS) {
688   // Get rid of the easy cases first.
689   if (!Save)
690     Save = &MBB;
691   else
692     Save = MDT->findNearestCommonDominator(Save, &MBB);
693   assert(Save);
694 
695   if (!Restore)
696     Restore = &MBB;
697   else if (MPDT->getNode(&MBB)) // If the block is not in the post dom tree, it
698                                 // means the block never returns. If that's the
699                                 // case, we don't want to call
700                                 // `findNearestCommonDominator`, which will
701                                 // return `Restore`.
702     Restore = MPDT->findNearestCommonDominator(Restore, &MBB);
703   else
704     Restore = nullptr; // Abort, we can't find a restore point in this case.
705 
706   // Make sure we would be able to insert the restore code before the
707   // terminator.
708   if (Restore == &MBB) {
709     for (const MachineInstr &Terminator : MBB.terminators()) {
710       if (!useOrDefCSROrFI(Terminator, RS, /*StackAddressUsed=*/true))
711         continue;
712       // One of the terminator needs to happen before the restore point.
713       if (MBB.succ_empty()) {
714         Restore = nullptr; // Abort, we can't find a restore point in this case.
715         break;
716       }
717       // Look for a restore point that post-dominates all the successors.
718       // The immediate post-dominator is what we are looking for.
719       Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
720       break;
721     }
722   }
723 
724   if (!Restore) {
725     LLVM_DEBUG(
726         dbgs() << "Restore point needs to be spanned on several blocks\n");
727     return;
728   }
729 
730   // Make sure Save and Restore are suitable for shrink-wrapping:
731   // 1. all path from Save needs to lead to Restore before exiting.
732   // 2. all path to Restore needs to go through Save from Entry.
733   // We achieve that by making sure that:
734   // A. Save dominates Restore.
735   // B. Restore post-dominates Save.
736   // C. Save and Restore are in the same loop.
737   bool SaveDominatesRestore = false;
738   bool RestorePostDominatesSave = false;
739   while (Restore &&
740          (!(SaveDominatesRestore = MDT->dominates(Save, Restore)) ||
741           !(RestorePostDominatesSave = MPDT->dominates(Restore, Save)) ||
742           // Post-dominance is not enough in loops to ensure that all uses/defs
743           // are after the prologue and before the epilogue at runtime.
744           // E.g.,
745           // while(1) {
746           //  Save
747           //  Restore
748           //   if (...)
749           //     break;
750           //  use/def CSRs
751           // }
752           // All the uses/defs of CSRs are dominated by Save and post-dominated
753           // by Restore. However, the CSRs uses are still reachable after
754           // Restore and before Save are executed.
755           //
756           // For now, just push the restore/save points outside of loops.
757           // FIXME: Refine the criteria to still find interesting cases
758           // for loops.
759           MLI->getLoopFor(Save) || MLI->getLoopFor(Restore))) {
760     // Fix (A).
761     if (!SaveDominatesRestore) {
762       Save = MDT->findNearestCommonDominator(Save, Restore);
763       continue;
764     }
765     // Fix (B).
766     if (!RestorePostDominatesSave)
767       Restore = MPDT->findNearestCommonDominator(Restore, Save);
768 
769     // Fix (C).
770     if (Restore && (MLI->getLoopFor(Save) || MLI->getLoopFor(Restore))) {
771       if (MLI->getLoopDepth(Save) > MLI->getLoopDepth(Restore)) {
772         // Push Save outside of this loop if immediate dominator is different
773         // from save block. If immediate dominator is not different, bail out.
774         Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
775         if (!Save)
776           break;
777       } else {
778         // If the loop does not exit, there is no point in looking
779         // for a post-dominator outside the loop.
780         SmallVector<MachineBasicBlock*, 4> ExitBlocks;
781         MLI->getLoopFor(Restore)->getExitingBlocks(ExitBlocks);
782         // Push Restore outside of this loop.
783         // Look for the immediate post-dominator of the loop exits.
784         MachineBasicBlock *IPdom = Restore;
785         for (MachineBasicBlock *LoopExitBB: ExitBlocks) {
786           IPdom = FindIDom<>(*IPdom, LoopExitBB->successors(), *MPDT);
787           if (!IPdom)
788             break;
789         }
790         // If the immediate post-dominator is not in a less nested loop,
791         // then we are stuck in a program with an infinite loop.
792         // In that case, we will not find a safe point, hence, bail out.
793         if (IPdom && MLI->getLoopDepth(IPdom) < MLI->getLoopDepth(Restore))
794           Restore = IPdom;
795         else {
796           Restore = nullptr;
797           break;
798         }
799       }
800     }
801   }
802 }
803 
giveUpWithRemarks(MachineOptimizationRemarkEmitter * ORE,StringRef RemarkName,StringRef RemarkMessage,const DiagnosticLocation & Loc,const MachineBasicBlock * MBB)804 static bool giveUpWithRemarks(MachineOptimizationRemarkEmitter *ORE,
805                               StringRef RemarkName, StringRef RemarkMessage,
806                               const DiagnosticLocation &Loc,
807                               const MachineBasicBlock *MBB) {
808   ORE->emit([&]() {
809     return MachineOptimizationRemarkMissed(DEBUG_TYPE, RemarkName, Loc, MBB)
810            << RemarkMessage;
811   });
812 
813   LLVM_DEBUG(dbgs() << RemarkMessage << '\n');
814   return false;
815 }
816 
performShrinkWrapping(const ReversePostOrderTraversal<MachineBasicBlock * > & RPOT,RegScavenger * RS)817 bool ShrinkWrap::performShrinkWrapping(
818     const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
819     RegScavenger *RS) {
820   for (MachineBasicBlock *MBB : RPOT) {
821     LLVM_DEBUG(dbgs() << "Look into: " << printMBBReference(*MBB) << '\n');
822 
823     if (MBB->isEHFuncletEntry())
824       return giveUpWithRemarks(ORE, "UnsupportedEHFunclets",
825                                "EH Funclets are not supported yet.",
826                                MBB->front().getDebugLoc(), MBB);
827 
828     if (MBB->isEHPad() || MBB->isInlineAsmBrIndirectTarget()) {
829       // Push the prologue and epilogue outside of the region that may throw (or
830       // jump out via inlineasm_br), by making sure that all the landing pads
831       // are at least at the boundary of the save and restore points.  The
832       // problem is that a basic block can jump out from the middle in these
833       // cases, which we do not handle.
834       updateSaveRestorePoints(*MBB, RS);
835       if (!ArePointsInteresting()) {
836         LLVM_DEBUG(dbgs() << "EHPad/inlineasm_br prevents shrink-wrapping\n");
837         return false;
838       }
839       continue;
840     }
841 
842     bool StackAddressUsed = false;
843     // Check if we found any stack accesses in the predecessors. We are not
844     // doing a full dataflow analysis here to keep things simple but just
845     // rely on a reverse portorder traversal (RPOT) to guarantee predecessors
846     // are already processed except for loops (and accept the conservative
847     // result for loops).
848     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
849       if (StackAddressUsedBlockInfo.test(Pred->getNumber())) {
850         StackAddressUsed = true;
851         break;
852       }
853     }
854 
855     for (const MachineInstr &MI : *MBB) {
856       if (useOrDefCSROrFI(MI, RS, StackAddressUsed)) {
857         // Save (resp. restore) point must dominate (resp. post dominate)
858         // MI. Look for the proper basic block for those.
859         updateSaveRestorePoints(*MBB, RS);
860         // If we are at a point where we cannot improve the placement of
861         // save/restore instructions, just give up.
862         if (!ArePointsInteresting()) {
863           LLVM_DEBUG(dbgs() << "No Shrink wrap candidate found\n");
864           return false;
865         }
866         // No need to look for other instructions, this basic block
867         // will already be part of the handled region.
868         StackAddressUsed = true;
869         break;
870       }
871     }
872     StackAddressUsedBlockInfo[MBB->getNumber()] = StackAddressUsed;
873   }
874   if (!ArePointsInteresting()) {
875     // If the points are not interesting at this point, then they must be null
876     // because it means we did not encounter any frame/CSR related code.
877     // Otherwise, we would have returned from the previous loop.
878     assert(!Save && !Restore && "We miss a shrink-wrap opportunity?!");
879     LLVM_DEBUG(dbgs() << "Nothing to shrink-wrap\n");
880     return false;
881   }
882 
883   LLVM_DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: "
884                     << EntryFreq.getFrequency() << '\n');
885 
886   const TargetFrameLowering *TFI =
887       MachineFunc->getSubtarget().getFrameLowering();
888   do {
889     LLVM_DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
890                       << printMBBReference(*Save) << ' '
891                       << printBlockFreq(*MBFI, *Save)
892                       << "\nRestore: " << printMBBReference(*Restore) << ' '
893                       << printBlockFreq(*MBFI, *Restore) << '\n');
894 
895     bool IsSaveCheap, TargetCanUseSaveAsPrologue = false;
896     if (((IsSaveCheap = EntryFreq >= MBFI->getBlockFreq(Save)) &&
897          EntryFreq >= MBFI->getBlockFreq(Restore)) &&
898         ((TargetCanUseSaveAsPrologue = TFI->canUseAsPrologue(*Save)) &&
899          TFI->canUseAsEpilogue(*Restore)))
900       break;
901     LLVM_DEBUG(
902         dbgs() << "New points are too expensive or invalid for the target\n");
903     MachineBasicBlock *NewBB;
904     if (!IsSaveCheap || !TargetCanUseSaveAsPrologue) {
905       Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
906       if (!Save)
907         break;
908       NewBB = Save;
909     } else {
910       // Restore is expensive.
911       Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
912       if (!Restore)
913         break;
914       NewBB = Restore;
915     }
916     updateSaveRestorePoints(*NewBB, RS);
917   } while (Save && Restore);
918 
919   if (!ArePointsInteresting()) {
920     ++NumCandidatesDropped;
921     return false;
922   }
923   return true;
924 }
925 
runOnMachineFunction(MachineFunction & MF)926 bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
927   if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
928     return false;
929 
930   LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
931 
932   init(MF);
933 
934   ReversePostOrderTraversal<MachineBasicBlock *> RPOT(&*MF.begin());
935   if (containsIrreducibleCFG<MachineBasicBlock *>(RPOT, *MLI)) {
936     // If MF is irreducible, a block may be in a loop without
937     // MachineLoopInfo reporting it. I.e., we may use the
938     // post-dominance property in loops, which lead to incorrect
939     // results. Moreover, we may miss that the prologue and
940     // epilogue are not in the same loop, leading to unbalanced
941     // construction/deconstruction of the stack frame.
942     return giveUpWithRemarks(ORE, "UnsupportedIrreducibleCFG",
943                              "Irreducible CFGs are not supported yet.",
944                              MF.getFunction().getSubprogram(), &MF.front());
945   }
946 
947   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
948   std::unique_ptr<RegScavenger> RS(
949       TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr);
950 
951   bool Changed = false;
952 
953   // Initially, conservatively assume that stack addresses can be used in each
954   // basic block and change the state only for those basic blocks for which we
955   // were able to prove the opposite.
956   StackAddressUsedBlockInfo.resize(MF.getNumBlockIDs(), true);
957   bool HasCandidate = performShrinkWrapping(RPOT, RS.get());
958   StackAddressUsedBlockInfo.clear();
959   Changed = postShrinkWrapping(HasCandidate, MF, RS.get());
960   if (!HasCandidate && !Changed)
961     return false;
962   if (!ArePointsInteresting())
963     return Changed;
964 
965   LLVM_DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: "
966                     << printMBBReference(*Save) << ' '
967                     << "\nRestore: " << printMBBReference(*Restore) << '\n');
968 
969   MachineFrameInfo &MFI = MF.getFrameInfo();
970   MFI.setSavePoint(Save);
971   MFI.setRestorePoint(Restore);
972   ++NumCandidates;
973   return Changed;
974 }
975 
isShrinkWrapEnabled(const MachineFunction & MF)976 bool ShrinkWrap::isShrinkWrapEnabled(const MachineFunction &MF) {
977   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
978 
979   switch (EnableShrinkWrapOpt) {
980   case cl::BOU_UNSET:
981     return TFI->enableShrinkWrapping(MF) &&
982            // Windows with CFI has some limitations that make it impossible
983            // to use shrink-wrapping.
984            !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
985            // Sanitizers look at the value of the stack at the location
986            // of the crash. Since a crash can happen anywhere, the
987            // frame must be lowered before anything else happen for the
988            // sanitizers to be able to get a correct stack frame.
989            !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) ||
990              MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) ||
991              MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) ||
992              MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress));
993   // If EnableShrinkWrap is set, it takes precedence on whatever the
994   // target sets. The rational is that we assume we want to test
995   // something related to shrink-wrapping.
996   case cl::BOU_TRUE:
997     return true;
998   case cl::BOU_FALSE:
999     return false;
1000   }
1001   llvm_unreachable("Invalid shrink-wrapping state");
1002 }
1003