xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86CallFrameOptimization.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a pass that optimizes call sequences on x86.
10 // Currently, it converts movs of function parameters onto the stack into
11 // pushes. This is beneficial for two main reasons:
12 // 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
13 // 2) It is possible to push memory arguments directly. So, if the
14 //    the transformation is performed pre-reg-alloc, it can help relieve
15 //    register pressure.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "MCTargetDesc/X86BaseInfo.h"
20 #include "X86.h"
21 #include "X86FrameLowering.h"
22 #include "X86InstrInfo.h"
23 #include "X86MachineFunctionInfo.h"
24 #include "X86RegisterInfo.h"
25 #include "X86Subtarget.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/MC/MCDwarf.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
45 #include <cassert>
46 #include <cstddef>
47 #include <cstdint>
48 #include <iterator>
49 
50 using namespace llvm;
51 
52 #define DEBUG_TYPE "x86-cf-opt"
53 
54 static cl::opt<bool>
55     NoX86CFOpt("no-x86-call-frame-opt",
56                cl::desc("Avoid optimizing x86 call frames for size"),
57                cl::init(false), cl::Hidden);
58 
59 namespace {
60 
61 class X86CallFrameOptimization : public MachineFunctionPass {
62 public:
63   X86CallFrameOptimization() : MachineFunctionPass(ID) { }
64 
65   bool runOnMachineFunction(MachineFunction &MF) override;
66 
67   static char ID;
68 
69 private:
70   // Information we know about a particular call site
71   struct CallContext {
72     CallContext() : FrameSetup(nullptr), ArgStoreVector(4, nullptr) {}
73 
74     // Iterator referring to the frame setup instruction
75     MachineBasicBlock::iterator FrameSetup;
76 
77     // Actual call instruction
78     MachineInstr *Call = nullptr;
79 
80     // A copy of the stack pointer
81     MachineInstr *SPCopy = nullptr;
82 
83     // The total displacement of all passed parameters
84     int64_t ExpectedDist = 0;
85 
86     // The sequence of storing instructions used to pass the parameters
87     SmallVector<MachineInstr *, 4> ArgStoreVector;
88 
89     // True if this call site has no stack parameters
90     bool NoStackParams = false;
91 
92     // True if this call site can use push instructions
93     bool UsePush = false;
94   };
95 
96   typedef SmallVector<CallContext, 8> ContextVector;
97 
98   bool isLegal(MachineFunction &MF);
99 
100   bool isProfitable(MachineFunction &MF, ContextVector &CallSeqMap);
101 
102   void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
103                        MachineBasicBlock::iterator I, CallContext &Context);
104 
105   void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
106 
107   MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
108                                    Register Reg);
109 
110   enum InstClassification { Convert, Skip, Exit };
111 
112   InstClassification classifyInstruction(MachineBasicBlock &MBB,
113                                          MachineBasicBlock::iterator MI,
114                                          const X86RegisterInfo &RegInfo,
115                                          const DenseSet<MCRegister> &UsedRegs);
116 
117   StringRef getPassName() const override { return "X86 Optimize Call Frame"; }
118 
119   const X86InstrInfo *TII = nullptr;
120   const X86FrameLowering *TFL = nullptr;
121   const X86Subtarget *STI = nullptr;
122   MachineRegisterInfo *MRI = nullptr;
123   unsigned SlotSize = 0;
124   unsigned Log2SlotSize = 0;
125 };
126 
127 } // end anonymous namespace
128 char X86CallFrameOptimization::ID = 0;
129 INITIALIZE_PASS(X86CallFrameOptimization, DEBUG_TYPE,
130                 "X86 Call Frame Optimization", false, false)
131 
132 // This checks whether the transformation is legal.
133 // Also returns false in cases where it's potentially legal, but
134 // we don't even want to try.
135 bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
136   if (NoX86CFOpt.getValue())
137     return false;
138 
139   // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
140   // in the compact unwind encoding that Darwin uses. So, bail if there
141   // is a danger of that being generated.
142   if (STI->isTargetDarwin() &&
143       (!MF.getLandingPads().empty() ||
144        (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
145     return false;
146 
147   // It is not valid to change the stack pointer outside the prolog/epilog
148   // on 64-bit Windows.
149   if (STI->isTargetWin64())
150     return false;
151 
152   // You would expect straight-line code between call-frame setup and
153   // call-frame destroy. You would be wrong. There are circumstances (e.g.
154   // CMOV_GR8 expansion of a select that feeds a function call!) where we can
155   // end up with the setup and the destroy in different basic blocks.
156   // This is bad, and breaks SP adjustment.
157   // So, check that all of the frames in the function are closed inside
158   // the same block, and, for good measure, that there are no nested frames.
159   //
160   // If any call allocates more argument stack memory than the stack
161   // probe size, don't do this optimization. Otherwise, this pass
162   // would need to synthesize additional stack probe calls to allocate
163   // memory for arguments.
164   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
165   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
166   bool EmitStackProbeCall = STI->getTargetLowering()->hasStackProbeSymbol(MF);
167   unsigned StackProbeSize = STI->getTargetLowering()->getStackProbeSize(MF);
168   for (MachineBasicBlock &BB : MF) {
169     bool InsideFrameSequence = false;
170     for (MachineInstr &MI : BB) {
171       if (MI.getOpcode() == FrameSetupOpcode) {
172         if (TII->getFrameSize(MI) >= StackProbeSize && EmitStackProbeCall)
173           return false;
174         if (InsideFrameSequence)
175           return false;
176         InsideFrameSequence = true;
177       } else if (MI.getOpcode() == FrameDestroyOpcode) {
178         if (!InsideFrameSequence)
179           return false;
180         InsideFrameSequence = false;
181       }
182     }
183 
184     if (InsideFrameSequence)
185       return false;
186   }
187 
188   return true;
189 }
190 
191 // Check whether this transformation is profitable for a particular
192 // function - in terms of code size.
193 bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
194                                             ContextVector &CallSeqVector) {
195   // This transformation is always a win when we do not expect to have
196   // a reserved call frame. Under other circumstances, it may be either
197   // a win or a loss, and requires a heuristic.
198   bool CannotReserveFrame = MF.getFrameInfo().hasVarSizedObjects();
199   if (CannotReserveFrame)
200     return true;
201 
202   Align StackAlign = TFL->getStackAlign();
203 
204   int64_t Advantage = 0;
205   for (const auto &CC : CallSeqVector) {
206     // Call sites where no parameters are passed on the stack
207     // do not affect the cost, since there needs to be no
208     // stack adjustment.
209     if (CC.NoStackParams)
210       continue;
211 
212     if (!CC.UsePush) {
213       // If we don't use pushes for a particular call site,
214       // we pay for not having a reserved call frame with an
215       // additional sub/add esp pair. The cost is ~3 bytes per instruction,
216       // depending on the size of the constant.
217       // TODO: Callee-pop functions should have a smaller penalty, because
218       // an add is needed even with a reserved call frame.
219       Advantage -= 6;
220     } else {
221       // We can use pushes. First, account for the fixed costs.
222       // We'll need a add after the call.
223       Advantage -= 3;
224       // If we have to realign the stack, we'll also need a sub before
225       if (!isAligned(StackAlign, CC.ExpectedDist))
226         Advantage -= 3;
227       // Now, for each push, we save ~3 bytes. For small constants, we actually,
228       // save more (up to 5 bytes), but 3 should be a good approximation.
229       Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
230     }
231   }
232 
233   return Advantage >= 0;
234 }
235 
236 bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
237   STI = &MF.getSubtarget<X86Subtarget>();
238   TII = STI->getInstrInfo();
239   TFL = STI->getFrameLowering();
240   MRI = &MF.getRegInfo();
241 
242   const X86RegisterInfo &RegInfo = *STI->getRegisterInfo();
243   SlotSize = RegInfo.getSlotSize();
244   assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
245   Log2SlotSize = Log2_32(SlotSize);
246 
247   if (skipFunction(MF.getFunction()) || !isLegal(MF))
248     return false;
249 
250   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
251 
252   bool Changed = false;
253 
254   ContextVector CallSeqVector;
255 
256   for (auto &MBB : MF)
257     for (auto &MI : MBB)
258       if (MI.getOpcode() == FrameSetupOpcode) {
259         CallContext Context;
260         collectCallInfo(MF, MBB, MI, Context);
261         CallSeqVector.push_back(Context);
262       }
263 
264   if (!isProfitable(MF, CallSeqVector))
265     return false;
266 
267   for (const auto &CC : CallSeqVector) {
268     if (CC.UsePush) {
269       adjustCallSequence(MF, CC);
270       Changed = true;
271     }
272   }
273 
274   return Changed;
275 }
276 
277 X86CallFrameOptimization::InstClassification
278 X86CallFrameOptimization::classifyInstruction(
279     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
280     const X86RegisterInfo &RegInfo, const DenseSet<MCRegister> &UsedRegs) {
281   if (MI == MBB.end())
282     return Exit;
283 
284   // The instructions we actually care about are movs onto the stack or special
285   // cases of constant-stores to stack
286   switch (MI->getOpcode()) {
287     case X86::AND16mi:
288     case X86::AND32mi:
289     case X86::AND64mi32: {
290       const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
291       return ImmOp.getImm() == 0 ? Convert : Exit;
292     }
293     case X86::OR16mi:
294     case X86::OR32mi:
295     case X86::OR64mi32: {
296       const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
297       return ImmOp.getImm() == -1 ? Convert : Exit;
298     }
299     case X86::MOV32mi:
300     case X86::MOV32mr:
301     case X86::MOV64mi32:
302     case X86::MOV64mr:
303       return Convert;
304   }
305 
306   // Not all calling conventions have only stack MOVs between the stack
307   // adjust and the call.
308 
309   // We want to tolerate other instructions, to cover more cases.
310   // In particular:
311   // a) PCrel calls, where we expect an additional COPY of the basereg.
312   // b) Passing frame-index addresses.
313   // c) Calling conventions that have inreg parameters. These generate
314   //    both copies and movs into registers.
315   // To avoid creating lots of special cases, allow any instruction
316   // that does not write into memory, does not def or use the stack
317   // pointer, and does not def any register that was used by a preceding
318   // push.
319   // (Reading from memory is allowed, even if referenced through a
320   // frame index, since these will get adjusted properly in PEI)
321 
322   // The reason for the last condition is that the pushes can't replace
323   // the movs in place, because the order must be reversed.
324   // So if we have a MOV32mr that uses EDX, then an instruction that defs
325   // EDX, and then the call, after the transformation the push will use
326   // the modified version of EDX, and not the original one.
327   // Since we are still in SSA form at this point, we only need to
328   // make sure we don't clobber any *physical* registers that were
329   // used by an earlier mov that will become a push.
330 
331   if (MI->isCall() || MI->mayStore())
332     return Exit;
333 
334   for (const MachineOperand &MO : MI->operands()) {
335     if (!MO.isReg())
336       continue;
337     Register Reg = MO.getReg();
338     if (!Reg.isPhysical())
339       continue;
340     if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
341       return Exit;
342     if (MO.isDef()) {
343       for (MCRegister U : UsedRegs)
344         if (RegInfo.regsOverlap(Reg, U))
345           return Exit;
346     }
347   }
348 
349   return Skip;
350 }
351 
352 void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
353                                                MachineBasicBlock &MBB,
354                                                MachineBasicBlock::iterator I,
355                                                CallContext &Context) {
356   // Check that this particular call sequence is amenable to the
357   // transformation.
358   const X86RegisterInfo &RegInfo = *STI->getRegisterInfo();
359 
360   // We expect to enter this at the beginning of a call sequence
361   assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
362   MachineBasicBlock::iterator FrameSetup = I++;
363   Context.FrameSetup = FrameSetup;
364 
365   // How much do we adjust the stack? This puts an upper bound on
366   // the number of parameters actually passed on it.
367   unsigned int MaxAdjust = TII->getFrameSize(*FrameSetup) >> Log2SlotSize;
368 
369   // A zero adjustment means no stack parameters
370   if (!MaxAdjust) {
371     Context.NoStackParams = true;
372     return;
373   }
374 
375   // Skip over DEBUG_VALUE.
376   // For globals in PIC mode, we can have some LEAs here. Skip them as well.
377   // TODO: Extend this to something that covers more cases.
378   while (I->getOpcode() == X86::LEA32r || I->isDebugInstr())
379     ++I;
380 
381   Register StackPtr = RegInfo.getStackRegister();
382   auto StackPtrCopyInst = MBB.end();
383   // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
384   // register.  If it's there, use that virtual register as stack pointer
385   // instead. Also, we need to locate this instruction so that we can later
386   // safely ignore it while doing the conservative processing of the call chain.
387   // The COPY can be located anywhere between the call-frame setup
388   // instruction and its first use. We use the call instruction as a boundary
389   // because it is usually cheaper to check if an instruction is a call than
390   // checking if an instruction uses a register.
391   for (auto J = I; !J->isCall(); ++J)
392     if (J->isCopy() && J->getOperand(0).isReg() && J->getOperand(1).isReg() &&
393         J->getOperand(1).getReg() == StackPtr) {
394       StackPtrCopyInst = J;
395       Context.SPCopy = &*J++;
396       StackPtr = Context.SPCopy->getOperand(0).getReg();
397       break;
398     }
399 
400   // Scan the call setup sequence for the pattern we're looking for.
401   // We only handle a simple case - a sequence of store instructions that
402   // push a sequence of stack-slot-aligned values onto the stack, with
403   // no gaps between them.
404   if (MaxAdjust > 4)
405     Context.ArgStoreVector.resize(MaxAdjust, nullptr);
406 
407   DenseSet<MCRegister> UsedRegs;
408 
409   for (InstClassification Classification = Skip; Classification != Exit; ++I) {
410     // If this is the COPY of the stack pointer, it's ok to ignore.
411     if (I == StackPtrCopyInst)
412       continue;
413     Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs);
414     if (Classification != Convert)
415       continue;
416     // We know the instruction has a supported store opcode.
417     // We only want movs of the form:
418     // mov imm/reg, k(%StackPtr)
419     // If we run into something else, bail.
420     // Note that AddrBaseReg may, counter to its name, not be a register,
421     // but rather a frame index.
422     // TODO: Support the fi case. This should probably work now that we
423     // have the infrastructure to track the stack pointer within a call
424     // sequence.
425     if (!I->getOperand(X86::AddrBaseReg).isReg() ||
426         (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
427         !I->getOperand(X86::AddrScaleAmt).isImm() ||
428         (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
429         (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
430         (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
431         !I->getOperand(X86::AddrDisp).isImm())
432       return;
433 
434     int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
435     assert(StackDisp >= 0 &&
436            "Negative stack displacement when passing parameters");
437 
438     // We really don't want to consider the unaligned case.
439     if (StackDisp & (SlotSize - 1))
440       return;
441     StackDisp >>= Log2SlotSize;
442 
443     assert((size_t)StackDisp < Context.ArgStoreVector.size() &&
444            "Function call has more parameters than the stack is adjusted for.");
445 
446     // If the same stack slot is being filled twice, something's fishy.
447     if (Context.ArgStoreVector[StackDisp] != nullptr)
448       return;
449     Context.ArgStoreVector[StackDisp] = &*I;
450 
451     for (const MachineOperand &MO : I->uses()) {
452       if (!MO.isReg())
453         continue;
454       Register Reg = MO.getReg();
455       if (Reg.isPhysical())
456         UsedRegs.insert(Reg.asMCReg());
457     }
458   }
459 
460   --I;
461 
462   // We now expect the end of the sequence. If we stopped early,
463   // or reached the end of the block without finding a call, bail.
464   if (I == MBB.end() || !I->isCall())
465     return;
466 
467   Context.Call = &*I;
468   if ((++I)->getOpcode() != TII->getCallFrameDestroyOpcode())
469     return;
470 
471   // Now, go through the vector, and see that we don't have any gaps,
472   // but only a series of storing instructions.
473   auto MMI = Context.ArgStoreVector.begin(), MME = Context.ArgStoreVector.end();
474   for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
475     if (*MMI == nullptr)
476       break;
477 
478   // If the call had no parameters, do nothing
479   if (MMI == Context.ArgStoreVector.begin())
480     return;
481 
482   // We are either at the last parameter, or a gap.
483   // Make sure it's not a gap
484   for (; MMI != MME; ++MMI)
485     if (*MMI != nullptr)
486       return;
487 
488   Context.UsePush = true;
489 }
490 
491 void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
492                                                   const CallContext &Context) {
493   // Ok, we can in fact do the transformation for this call.
494   // Do not remove the FrameSetup instruction, but adjust the parameters.
495   // PEI will end up finalizing the handling of this.
496   MachineBasicBlock::iterator FrameSetup = Context.FrameSetup;
497   MachineBasicBlock &MBB = *(FrameSetup->getParent());
498   TII->setFrameAdjustment(*FrameSetup, Context.ExpectedDist);
499 
500   const DebugLoc &DL = FrameSetup->getDebugLoc();
501   bool Is64Bit = STI->is64Bit();
502   // Now, iterate through the vector in reverse order, and replace the store to
503   // stack with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
504   // replace uses.
505   for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
506     MachineBasicBlock::iterator Store = *Context.ArgStoreVector[Idx];
507     const MachineOperand &PushOp = Store->getOperand(X86::AddrNumOperands);
508     MachineBasicBlock::iterator Push = nullptr;
509     unsigned PushOpcode;
510     switch (Store->getOpcode()) {
511     default:
512       llvm_unreachable("Unexpected Opcode!");
513     case X86::AND16mi:
514     case X86::AND32mi:
515     case X86::AND64mi32:
516     case X86::OR16mi:
517     case X86::OR32mi:
518     case X86::OR64mi32:
519     case X86::MOV32mi:
520     case X86::MOV64mi32:
521       PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSH32i;
522       Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp);
523       Push->cloneMemRefs(MF, *Store);
524       break;
525     case X86::MOV32mr:
526     case X86::MOV64mr: {
527       Register Reg = PushOp.getReg();
528 
529       // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
530       // in preparation for the PUSH64. The upper 32 bits can be undef.
531       if (Is64Bit && Store->getOpcode() == X86::MOV32mr) {
532         Register UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
533         Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
534         BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
535         BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
536             .addReg(UndefReg)
537             .add(PushOp)
538             .addImm(X86::sub_32bit);
539       }
540 
541       // If PUSHrmm is not slow on this target, try to fold the source of the
542       // push into the instruction.
543       bool SlowPUSHrmm = STI->slowTwoMemOps();
544 
545       // Check that this is legal to fold. Right now, we're extremely
546       // conservative about that.
547       MachineInstr *DefMov = nullptr;
548       if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
549         PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
550         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
551 
552         unsigned NumOps = DefMov->getDesc().getNumOperands();
553         for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
554           Push->addOperand(DefMov->getOperand(i));
555         Push->cloneMergedMemRefs(MF, {DefMov, &*Store});
556         DefMov->eraseFromParent();
557       } else {
558         PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
559         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
560                    .addReg(Reg)
561                    .getInstr();
562         Push->cloneMemRefs(MF, *Store);
563       }
564       break;
565     }
566     }
567 
568     // For debugging, when using SP-based CFA, we need to adjust the CFA
569     // offset after each push.
570     // TODO: This is needed only if we require precise CFA.
571     if (!TFL->hasFP(MF))
572       TFL->BuildCFI(
573           MBB, std::next(Push), DL,
574           MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
575 
576     MBB.erase(Store);
577   }
578 
579   // The stack-pointer copy is no longer used in the call sequences.
580   // There should not be any other users, but we can't commit to that, so:
581   if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
582     Context.SPCopy->eraseFromParent();
583 
584   // Once we've done this, we need to make sure PEI doesn't assume a reserved
585   // frame.
586   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
587   FuncInfo->setHasPushSequences(true);
588 }
589 
590 MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
591     MachineBasicBlock::iterator FrameSetup, Register Reg) {
592   // Do an extremely restricted form of load folding.
593   // ISel will often create patterns like:
594   // movl    4(%edi), %eax
595   // movl    8(%edi), %ecx
596   // movl    12(%edi), %edx
597   // movl    %edx, 8(%esp)
598   // movl    %ecx, 4(%esp)
599   // movl    %eax, (%esp)
600   // call
601   // Get rid of those with prejudice.
602   if (!Reg.isVirtual())
603     return nullptr;
604 
605   // Make sure this is the only use of Reg.
606   if (!MRI->hasOneNonDBGUse(Reg))
607     return nullptr;
608 
609   MachineInstr &DefMI = *MRI->getVRegDef(Reg);
610 
611   // Make sure the def is a MOV from memory.
612   // If the def is in another block, give up.
613   if ((DefMI.getOpcode() != X86::MOV32rm &&
614        DefMI.getOpcode() != X86::MOV64rm) ||
615       DefMI.getParent() != FrameSetup->getParent())
616     return nullptr;
617 
618   // Make sure we don't have any instructions between DefMI and the
619   // push that make folding the load illegal.
620   for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
621     if (I->isLoadFoldBarrier())
622       return nullptr;
623 
624   return &DefMI;
625 }
626 
627 FunctionPass *llvm::createX86CallFrameOptimization() {
628   return new X86CallFrameOptimization();
629 }
630