xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Base ARM implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARMBaseInstrInfo.h"
14 #include "ARMBaseRegisterInfo.h"
15 #include "ARMConstantPoolValue.h"
16 #include "ARMFeatures.h"
17 #include "ARMHazardRecognizer.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "MCTargetDesc/ARMBaseInfo.h"
22 #include "MVETailPredUtils.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/CodeGen/LiveVariables.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineConstantPool.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineMemOperand.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/MachineScheduler.h"
40 #include "llvm/CodeGen/MultiHazardRecognizer.h"
41 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
42 #include "llvm/CodeGen/SelectionDAGNodes.h"
43 #include "llvm/CodeGen/TargetInstrInfo.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSchedule.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/MC/MCAsmInfo.h"
52 #include "llvm/MC/MCInstrDesc.h"
53 #include "llvm/MC/MCInstrItineraries.h"
54 #include "llvm/Support/BranchProbability.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Target/TargetMachine.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <cstdint>
65 #include <iterator>
66 #include <new>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 
72 #define DEBUG_TYPE "arm-instrinfo"
73 
74 #define GET_INSTRINFO_CTOR_DTOR
75 #include "ARMGenInstrInfo.inc"
76 
77 static cl::opt<bool>
78 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
79                cl::desc("Enable ARM 2-addr to 3-addr conv"));
80 
81 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
82 struct ARM_MLxEntry {
83   uint16_t MLxOpc;     // MLA / MLS opcode
84   uint16_t MulOpc;     // Expanded multiplication opcode
85   uint16_t AddSubOpc;  // Expanded add / sub opcode
86   bool NegAcc;         // True if the acc is negated before the add / sub.
87   bool HasLane;        // True if instruction has an extra "lane" operand.
88 };
89 
90 static const ARM_MLxEntry ARM_MLxTable[] = {
91   // MLxOpc,          MulOpc,           AddSubOpc,       NegAcc, HasLane
92   // fp scalar ops
93   { ARM::VMLAS,       ARM::VMULS,       ARM::VADDS,      false,  false },
94   { ARM::VMLSS,       ARM::VMULS,       ARM::VSUBS,      false,  false },
95   { ARM::VMLAD,       ARM::VMULD,       ARM::VADDD,      false,  false },
96   { ARM::VMLSD,       ARM::VMULD,       ARM::VSUBD,      false,  false },
97   { ARM::VNMLAS,      ARM::VNMULS,      ARM::VSUBS,      true,   false },
98   { ARM::VNMLSS,      ARM::VMULS,       ARM::VSUBS,      true,   false },
99   { ARM::VNMLAD,      ARM::VNMULD,      ARM::VSUBD,      true,   false },
100   { ARM::VNMLSD,      ARM::VMULD,       ARM::VSUBD,      true,   false },
101 
102   // fp SIMD ops
103   { ARM::VMLAfd,      ARM::VMULfd,      ARM::VADDfd,     false,  false },
104   { ARM::VMLSfd,      ARM::VMULfd,      ARM::VSUBfd,     false,  false },
105   { ARM::VMLAfq,      ARM::VMULfq,      ARM::VADDfq,     false,  false },
106   { ARM::VMLSfq,      ARM::VMULfq,      ARM::VSUBfq,     false,  false },
107   { ARM::VMLAslfd,    ARM::VMULslfd,    ARM::VADDfd,     false,  true  },
108   { ARM::VMLSslfd,    ARM::VMULslfd,    ARM::VSUBfd,     false,  true  },
109   { ARM::VMLAslfq,    ARM::VMULslfq,    ARM::VADDfq,     false,  true  },
110   { ARM::VMLSslfq,    ARM::VMULslfq,    ARM::VSUBfq,     false,  true  },
111 };
112 
113 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
114   : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
115     Subtarget(STI) {
116   for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
117     if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
118       llvm_unreachable("Duplicated entries?");
119     MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
120     MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
121   }
122 }
123 
124 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
125 // currently defaults to no prepass hazard recognizer.
126 ScheduleHazardRecognizer *
127 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
128                                                const ScheduleDAG *DAG) const {
129   if (usePreRAHazardRecognizer()) {
130     const InstrItineraryData *II =
131         static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
132     return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
133   }
134   return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
135 }
136 
137 // Called during:
138 // - pre-RA scheduling
139 // - post-RA scheduling when FeatureUseMISched is set
140 ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer(
141     const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
142   MultiHazardRecognizer *MHR = new MultiHazardRecognizer();
143 
144   // We would like to restrict this hazard recognizer to only
145   // post-RA scheduling; we can tell that we're post-RA because we don't
146   // track VRegLiveness.
147   // Cortex-M7: TRM indicates that there is a single ITCM bank and two DTCM
148   //            banks banked on bit 2.  Assume that TCMs are in use.
149   if (Subtarget.isCortexM7() && !DAG->hasVRegLiveness())
150     MHR->AddHazardRecognizer(
151         std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4, true));
152 
153   // Not inserting ARMHazardRecognizerFPMLx because that would change
154   // legacy behavior
155 
156   auto BHR = TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG);
157   MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR));
158   return MHR;
159 }
160 
161 // Called during post-RA scheduling when FeatureUseMISched is not set
162 ScheduleHazardRecognizer *ARMBaseInstrInfo::
163 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
164                                    const ScheduleDAG *DAG) const {
165   MultiHazardRecognizer *MHR = new MultiHazardRecognizer();
166 
167   if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
168     MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>());
169 
170   auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
171   if (BHR)
172     MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR));
173   return MHR;
174 }
175 
176 MachineInstr *
177 ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
178                                         LiveIntervals *LIS) const {
179   // FIXME: Thumb2 support.
180 
181   if (!EnableARM3Addr)
182     return nullptr;
183 
184   MachineFunction &MF = *MI.getParent()->getParent();
185   uint64_t TSFlags = MI.getDesc().TSFlags;
186   bool isPre = false;
187   switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
188   default: return nullptr;
189   case ARMII::IndexModePre:
190     isPre = true;
191     break;
192   case ARMII::IndexModePost:
193     break;
194   }
195 
196   // Try splitting an indexed load/store to an un-indexed one plus an add/sub
197   // operation.
198   unsigned MemOpc = getUnindexedOpcode(MI.getOpcode());
199   if (MemOpc == 0)
200     return nullptr;
201 
202   MachineInstr *UpdateMI = nullptr;
203   MachineInstr *MemMI = nullptr;
204   unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
205   const MCInstrDesc &MCID = MI.getDesc();
206   unsigned NumOps = MCID.getNumOperands();
207   bool isLoad = !MI.mayStore();
208   const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0);
209   const MachineOperand &Base = MI.getOperand(2);
210   const MachineOperand &Offset = MI.getOperand(NumOps - 3);
211   Register WBReg = WB.getReg();
212   Register BaseReg = Base.getReg();
213   Register OffReg = Offset.getReg();
214   unsigned OffImm = MI.getOperand(NumOps - 2).getImm();
215   ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm();
216   switch (AddrMode) {
217   default: llvm_unreachable("Unknown indexed op!");
218   case ARMII::AddrMode2: {
219     bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
220     unsigned Amt = ARM_AM::getAM2Offset(OffImm);
221     if (OffReg == 0) {
222       if (ARM_AM::getSOImmVal(Amt) == -1)
223         // Can't encode it in a so_imm operand. This transformation will
224         // add more than 1 instruction. Abandon!
225         return nullptr;
226       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
227                          get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
228                      .addReg(BaseReg)
229                      .addImm(Amt)
230                      .add(predOps(Pred))
231                      .add(condCodeOp());
232     } else if (Amt != 0) {
233       ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
234       unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
235       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
236                          get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
237                      .addReg(BaseReg)
238                      .addReg(OffReg)
239                      .addReg(0)
240                      .addImm(SOOpc)
241                      .add(predOps(Pred))
242                      .add(condCodeOp());
243     } else
244       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
245                          get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
246                      .addReg(BaseReg)
247                      .addReg(OffReg)
248                      .add(predOps(Pred))
249                      .add(condCodeOp());
250     break;
251   }
252   case ARMII::AddrMode3 : {
253     bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
254     unsigned Amt = ARM_AM::getAM3Offset(OffImm);
255     if (OffReg == 0)
256       // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
257       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
258                          get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
259                      .addReg(BaseReg)
260                      .addImm(Amt)
261                      .add(predOps(Pred))
262                      .add(condCodeOp());
263     else
264       UpdateMI = BuildMI(MF, MI.getDebugLoc(),
265                          get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
266                      .addReg(BaseReg)
267                      .addReg(OffReg)
268                      .add(predOps(Pred))
269                      .add(condCodeOp());
270     break;
271   }
272   }
273 
274   std::vector<MachineInstr*> NewMIs;
275   if (isPre) {
276     if (isLoad)
277       MemMI =
278           BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
279               .addReg(WBReg)
280               .addImm(0)
281               .addImm(Pred);
282     else
283       MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
284                   .addReg(MI.getOperand(1).getReg())
285                   .addReg(WBReg)
286                   .addReg(0)
287                   .addImm(0)
288                   .addImm(Pred);
289     NewMIs.push_back(MemMI);
290     NewMIs.push_back(UpdateMI);
291   } else {
292     if (isLoad)
293       MemMI =
294           BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
295               .addReg(BaseReg)
296               .addImm(0)
297               .addImm(Pred);
298     else
299       MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
300                   .addReg(MI.getOperand(1).getReg())
301                   .addReg(BaseReg)
302                   .addReg(0)
303                   .addImm(0)
304                   .addImm(Pred);
305     if (WB.isDead())
306       UpdateMI->getOperand(0).setIsDead();
307     NewMIs.push_back(UpdateMI);
308     NewMIs.push_back(MemMI);
309   }
310 
311   // Transfer LiveVariables states, kill / dead info.
312   if (LV) {
313     for (const MachineOperand &MO : MI.operands()) {
314       if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
315         Register Reg = MO.getReg();
316 
317         LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
318         if (MO.isDef()) {
319           MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
320           if (MO.isDead())
321             LV->addVirtualRegisterDead(Reg, *NewMI);
322         }
323         if (MO.isUse() && MO.isKill()) {
324           for (unsigned j = 0; j < 2; ++j) {
325             // Look at the two new MI's in reverse order.
326             MachineInstr *NewMI = NewMIs[j];
327             if (!NewMI->readsRegister(Reg))
328               continue;
329             LV->addVirtualRegisterKilled(Reg, *NewMI);
330             if (VI.removeKill(MI))
331               VI.Kills.push_back(NewMI);
332             break;
333           }
334         }
335       }
336     }
337   }
338 
339   MachineBasicBlock &MBB = *MI.getParent();
340   MBB.insert(MI, NewMIs[1]);
341   MBB.insert(MI, NewMIs[0]);
342   return NewMIs[0];
343 }
344 
345 // Branch analysis.
346 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
347                                      MachineBasicBlock *&TBB,
348                                      MachineBasicBlock *&FBB,
349                                      SmallVectorImpl<MachineOperand> &Cond,
350                                      bool AllowModify) const {
351   TBB = nullptr;
352   FBB = nullptr;
353 
354   MachineBasicBlock::instr_iterator I = MBB.instr_end();
355   if (I == MBB.instr_begin())
356     return false; // Empty blocks are easy.
357   --I;
358 
359   // Walk backwards from the end of the basic block until the branch is
360   // analyzed or we give up.
361   while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) {
362     // Flag to be raised on unanalyzeable instructions. This is useful in cases
363     // where we want to clean up on the end of the basic block before we bail
364     // out.
365     bool CantAnalyze = false;
366 
367     // Skip over DEBUG values, predicated nonterminators and speculation
368     // barrier terminators.
369     while (I->isDebugInstr() || !I->isTerminator() ||
370            isSpeculationBarrierEndBBOpcode(I->getOpcode()) ||
371            I->getOpcode() == ARM::t2DoLoopStartTP){
372       if (I == MBB.instr_begin())
373         return false;
374       --I;
375     }
376 
377     if (isIndirectBranchOpcode(I->getOpcode()) ||
378         isJumpTableBranchOpcode(I->getOpcode())) {
379       // Indirect branches and jump tables can't be analyzed, but we still want
380       // to clean up any instructions at the tail of the basic block.
381       CantAnalyze = true;
382     } else if (isUncondBranchOpcode(I->getOpcode())) {
383       TBB = I->getOperand(0).getMBB();
384     } else if (isCondBranchOpcode(I->getOpcode())) {
385       // Bail out if we encounter multiple conditional branches.
386       if (!Cond.empty())
387         return true;
388 
389       assert(!FBB && "FBB should have been null.");
390       FBB = TBB;
391       TBB = I->getOperand(0).getMBB();
392       Cond.push_back(I->getOperand(1));
393       Cond.push_back(I->getOperand(2));
394     } else if (I->isReturn()) {
395       // Returns can't be analyzed, but we should run cleanup.
396       CantAnalyze = true;
397     } else {
398       // We encountered other unrecognized terminator. Bail out immediately.
399       return true;
400     }
401 
402     // Cleanup code - to be run for unpredicated unconditional branches and
403     //                returns.
404     if (!isPredicated(*I) &&
405           (isUncondBranchOpcode(I->getOpcode()) ||
406            isIndirectBranchOpcode(I->getOpcode()) ||
407            isJumpTableBranchOpcode(I->getOpcode()) ||
408            I->isReturn())) {
409       // Forget any previous condition branch information - it no longer applies.
410       Cond.clear();
411       FBB = nullptr;
412 
413       // If we can modify the function, delete everything below this
414       // unconditional branch.
415       if (AllowModify) {
416         MachineBasicBlock::iterator DI = std::next(I);
417         while (DI != MBB.instr_end()) {
418           MachineInstr &InstToDelete = *DI;
419           ++DI;
420           // Speculation barriers must not be deleted.
421           if (isSpeculationBarrierEndBBOpcode(InstToDelete.getOpcode()))
422             continue;
423           InstToDelete.eraseFromParent();
424         }
425       }
426     }
427 
428     if (CantAnalyze) {
429       // We may not be able to analyze the block, but we could still have
430       // an unconditional branch as the last instruction in the block, which
431       // just branches to layout successor. If this is the case, then just
432       // remove it if we're allowed to make modifications.
433       if (AllowModify && !isPredicated(MBB.back()) &&
434           isUncondBranchOpcode(MBB.back().getOpcode()) &&
435           TBB && MBB.isLayoutSuccessor(TBB))
436         removeBranch(MBB);
437       return true;
438     }
439 
440     if (I == MBB.instr_begin())
441       return false;
442 
443     --I;
444   }
445 
446   // We made it past the terminators without bailing out - we must have
447   // analyzed this branch successfully.
448   return false;
449 }
450 
451 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB,
452                                         int *BytesRemoved) const {
453   assert(!BytesRemoved && "code size not handled");
454 
455   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
456   if (I == MBB.end())
457     return 0;
458 
459   if (!isUncondBranchOpcode(I->getOpcode()) &&
460       !isCondBranchOpcode(I->getOpcode()))
461     return 0;
462 
463   // Remove the branch.
464   I->eraseFromParent();
465 
466   I = MBB.end();
467 
468   if (I == MBB.begin()) return 1;
469   --I;
470   if (!isCondBranchOpcode(I->getOpcode()))
471     return 1;
472 
473   // Remove the branch.
474   I->eraseFromParent();
475   return 2;
476 }
477 
478 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB,
479                                         MachineBasicBlock *TBB,
480                                         MachineBasicBlock *FBB,
481                                         ArrayRef<MachineOperand> Cond,
482                                         const DebugLoc &DL,
483                                         int *BytesAdded) const {
484   assert(!BytesAdded && "code size not handled");
485   ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
486   int BOpc   = !AFI->isThumbFunction()
487     ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
488   int BccOpc = !AFI->isThumbFunction()
489     ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
490   bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
491 
492   // Shouldn't be a fall through.
493   assert(TBB && "insertBranch must not be told to insert a fallthrough");
494   assert((Cond.size() == 2 || Cond.size() == 0) &&
495          "ARM branch conditions have two components!");
496 
497   // For conditional branches, we use addOperand to preserve CPSR flags.
498 
499   if (!FBB) {
500     if (Cond.empty()) { // Unconditional branch?
501       if (isThumb)
502         BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL));
503       else
504         BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
505     } else
506       BuildMI(&MBB, DL, get(BccOpc))
507           .addMBB(TBB)
508           .addImm(Cond[0].getImm())
509           .add(Cond[1]);
510     return 1;
511   }
512 
513   // Two-way conditional branch.
514   BuildMI(&MBB, DL, get(BccOpc))
515       .addMBB(TBB)
516       .addImm(Cond[0].getImm())
517       .add(Cond[1]);
518   if (isThumb)
519     BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL));
520   else
521     BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
522   return 2;
523 }
524 
525 bool ARMBaseInstrInfo::
526 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
527   ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
528   Cond[0].setImm(ARMCC::getOppositeCondition(CC));
529   return false;
530 }
531 
532 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const {
533   if (MI.isBundle()) {
534     MachineBasicBlock::const_instr_iterator I = MI.getIterator();
535     MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
536     while (++I != E && I->isInsideBundle()) {
537       int PIdx = I->findFirstPredOperandIdx();
538       if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
539         return true;
540     }
541     return false;
542   }
543 
544   int PIdx = MI.findFirstPredOperandIdx();
545   return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL;
546 }
547 
548 std::string ARMBaseInstrInfo::createMIROperandComment(
549     const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
550     const TargetRegisterInfo *TRI) const {
551 
552   // First, let's see if there is a generic comment for this operand
553   std::string GenericComment =
554       TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
555   if (!GenericComment.empty())
556     return GenericComment;
557 
558   // If not, check if we have an immediate operand.
559   if (Op.getType() != MachineOperand::MO_Immediate)
560     return std::string();
561 
562   // And print its corresponding condition code if the immediate is a
563   // predicate.
564   int FirstPredOp = MI.findFirstPredOperandIdx();
565   if (FirstPredOp != (int) OpIdx)
566     return std::string();
567 
568   std::string CC = "CC::";
569   CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm());
570   return CC;
571 }
572 
573 bool ARMBaseInstrInfo::PredicateInstruction(
574     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
575   unsigned Opc = MI.getOpcode();
576   if (isUncondBranchOpcode(Opc)) {
577     MI.setDesc(get(getMatchingCondBranchOpcode(Opc)));
578     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
579       .addImm(Pred[0].getImm())
580       .addReg(Pred[1].getReg());
581     return true;
582   }
583 
584   int PIdx = MI.findFirstPredOperandIdx();
585   if (PIdx != -1) {
586     MachineOperand &PMO = MI.getOperand(PIdx);
587     PMO.setImm(Pred[0].getImm());
588     MI.getOperand(PIdx+1).setReg(Pred[1].getReg());
589 
590     // Thumb 1 arithmetic instructions do not set CPSR when executed inside an
591     // IT block. This affects how they are printed.
592     const MCInstrDesc &MCID = MI.getDesc();
593     if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
594       assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand");
595       assert((MI.getOperand(1).isDead() ||
596               MI.getOperand(1).getReg() != ARM::CPSR) &&
597              "if conversion tried to stop defining used CPSR");
598       MI.getOperand(1).setReg(ARM::NoRegister);
599     }
600 
601     return true;
602   }
603   return false;
604 }
605 
606 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
607                                          ArrayRef<MachineOperand> Pred2) const {
608   if (Pred1.size() > 2 || Pred2.size() > 2)
609     return false;
610 
611   ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
612   ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
613   if (CC1 == CC2)
614     return true;
615 
616   switch (CC1) {
617   default:
618     return false;
619   case ARMCC::AL:
620     return true;
621   case ARMCC::HS:
622     return CC2 == ARMCC::HI;
623   case ARMCC::LS:
624     return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
625   case ARMCC::GE:
626     return CC2 == ARMCC::GT;
627   case ARMCC::LE:
628     return CC2 == ARMCC::LT;
629   }
630 }
631 
632 bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI,
633                                          std::vector<MachineOperand> &Pred,
634                                          bool SkipDead) const {
635   bool Found = false;
636   for (const MachineOperand &MO : MI.operands()) {
637     bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
638     bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
639     if (ClobbersCPSR || IsCPSR) {
640 
641       // Filter out T1 instructions that have a dead CPSR,
642       // allowing IT blocks to be generated containing T1 instructions
643       const MCInstrDesc &MCID = MI.getDesc();
644       if (MCID.TSFlags & ARMII::ThumbArithFlagSetting && MO.isDead() &&
645           SkipDead)
646         continue;
647 
648       Pred.push_back(MO);
649       Found = true;
650     }
651   }
652 
653   return Found;
654 }
655 
656 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) {
657   for (const auto &MO : MI.operands())
658     if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
659       return true;
660   return false;
661 }
662 
663 static bool isEligibleForITBlock(const MachineInstr *MI) {
664   switch (MI->getOpcode()) {
665   default: return true;
666   case ARM::tADC:   // ADC (register) T1
667   case ARM::tADDi3: // ADD (immediate) T1
668   case ARM::tADDi8: // ADD (immediate) T2
669   case ARM::tADDrr: // ADD (register) T1
670   case ARM::tAND:   // AND (register) T1
671   case ARM::tASRri: // ASR (immediate) T1
672   case ARM::tASRrr: // ASR (register) T1
673   case ARM::tBIC:   // BIC (register) T1
674   case ARM::tEOR:   // EOR (register) T1
675   case ARM::tLSLri: // LSL (immediate) T1
676   case ARM::tLSLrr: // LSL (register) T1
677   case ARM::tLSRri: // LSR (immediate) T1
678   case ARM::tLSRrr: // LSR (register) T1
679   case ARM::tMUL:   // MUL T1
680   case ARM::tMVN:   // MVN (register) T1
681   case ARM::tORR:   // ORR (register) T1
682   case ARM::tROR:   // ROR (register) T1
683   case ARM::tRSB:   // RSB (immediate) T1
684   case ARM::tSBC:   // SBC (register) T1
685   case ARM::tSUBi3: // SUB (immediate) T1
686   case ARM::tSUBi8: // SUB (immediate) T2
687   case ARM::tSUBrr: // SUB (register) T1
688     return !ARMBaseInstrInfo::isCPSRDefined(*MI);
689   }
690 }
691 
692 /// isPredicable - Return true if the specified instruction can be predicated.
693 /// By default, this returns true for every instruction with a
694 /// PredicateOperand.
695 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const {
696   if (!MI.isPredicable())
697     return false;
698 
699   if (MI.isBundle())
700     return false;
701 
702   if (!isEligibleForITBlock(&MI))
703     return false;
704 
705   const MachineFunction *MF = MI.getParent()->getParent();
706   const ARMFunctionInfo *AFI =
707       MF->getInfo<ARMFunctionInfo>();
708 
709   // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM.
710   // In their ARM encoding, they can't be encoded in a conditional form.
711   if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
712     return false;
713 
714   // Make indirect control flow changes unpredicable when SLS mitigation is
715   // enabled.
716   const ARMSubtarget &ST = MF->getSubtarget<ARMSubtarget>();
717   if (ST.hardenSlsRetBr() && isIndirectControlFlowNotComingBack(MI))
718     return false;
719   if (ST.hardenSlsBlr() && isIndirectCall(MI))
720     return false;
721 
722   if (AFI->isThumb2Function()) {
723     if (getSubtarget().restrictIT())
724       return isV8EligibleForIT(&MI);
725   }
726 
727   return true;
728 }
729 
730 namespace llvm {
731 
732 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) {
733   for (const MachineOperand &MO : MI->operands()) {
734     if (!MO.isReg() || MO.isUndef() || MO.isUse())
735       continue;
736     if (MO.getReg() != ARM::CPSR)
737       continue;
738     if (!MO.isDead())
739       return false;
740   }
741   // all definitions of CPSR are dead
742   return true;
743 }
744 
745 } // end namespace llvm
746 
747 /// GetInstSize - Return the size of the specified MachineInstr.
748 ///
749 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
750   const MachineBasicBlock &MBB = *MI.getParent();
751   const MachineFunction *MF = MBB.getParent();
752   const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
753 
754   const MCInstrDesc &MCID = MI.getDesc();
755 
756   switch (MI.getOpcode()) {
757   default:
758     // Return the size specified in .td file. If there's none, return 0, as we
759     // can't define a default size (Thumb1 instructions are 2 bytes, Thumb2
760     // instructions are 2-4 bytes, and ARM instructions are 4 bytes), in
761     // contrast to AArch64 instructions which have a default size of 4 bytes for
762     // example.
763     return MCID.getSize();
764   case TargetOpcode::BUNDLE:
765     return getInstBundleLength(MI);
766   case ARM::CONSTPOOL_ENTRY:
767   case ARM::JUMPTABLE_INSTS:
768   case ARM::JUMPTABLE_ADDRS:
769   case ARM::JUMPTABLE_TBB:
770   case ARM::JUMPTABLE_TBH:
771     // If this machine instr is a constant pool entry, its size is recorded as
772     // operand #2.
773     return MI.getOperand(2).getImm();
774   case ARM::SPACE:
775     return MI.getOperand(1).getImm();
776   case ARM::INLINEASM:
777   case ARM::INLINEASM_BR: {
778     // If this machine instr is an inline asm, measure it.
779     unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
780     if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction())
781       Size = alignTo(Size, 4);
782     return Size;
783   }
784   }
785 }
786 
787 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
788   unsigned Size = 0;
789   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
790   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
791   while (++I != E && I->isInsideBundle()) {
792     assert(!I->isBundle() && "No nested bundle!");
793     Size += getInstSizeInBytes(*I);
794   }
795   return Size;
796 }
797 
798 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB,
799                                     MachineBasicBlock::iterator I,
800                                     unsigned DestReg, bool KillSrc,
801                                     const ARMSubtarget &Subtarget) const {
802   unsigned Opc = Subtarget.isThumb()
803                      ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
804                      : ARM::MRS;
805 
806   MachineInstrBuilder MIB =
807       BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg);
808 
809   // There is only 1 A/R class MRS instruction, and it always refers to
810   // APSR. However, there are lots of other possibilities on M-class cores.
811   if (Subtarget.isMClass())
812     MIB.addImm(0x800);
813 
814   MIB.add(predOps(ARMCC::AL))
815      .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc));
816 }
817 
818 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
819                                   MachineBasicBlock::iterator I,
820                                   unsigned SrcReg, bool KillSrc,
821                                   const ARMSubtarget &Subtarget) const {
822   unsigned Opc = Subtarget.isThumb()
823                      ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
824                      : ARM::MSR;
825 
826   MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
827 
828   if (Subtarget.isMClass())
829     MIB.addImm(0x800);
830   else
831     MIB.addImm(8);
832 
833   MIB.addReg(SrcReg, getKillRegState(KillSrc))
834      .add(predOps(ARMCC::AL))
835      .addReg(ARM::CPSR, RegState::Implicit | RegState::Define);
836 }
837 
838 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) {
839   MIB.addImm(ARMVCC::None);
840   MIB.addReg(0);
841   MIB.addReg(0); // tp_reg
842 }
843 
844 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB,
845                                       Register DestReg) {
846   addUnpredicatedMveVpredNOp(MIB);
847   MIB.addReg(DestReg, RegState::Undef);
848 }
849 
850 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) {
851   MIB.addImm(Cond);
852   MIB.addReg(ARM::VPR, RegState::Implicit);
853   MIB.addReg(0); // tp_reg
854 }
855 
856 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB,
857                                     unsigned Cond, unsigned Inactive) {
858   addPredicatedMveVpredNOp(MIB, Cond);
859   MIB.addReg(Inactive);
860 }
861 
862 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
863                                    MachineBasicBlock::iterator I,
864                                    const DebugLoc &DL, MCRegister DestReg,
865                                    MCRegister SrcReg, bool KillSrc) const {
866   bool GPRDest = ARM::GPRRegClass.contains(DestReg);
867   bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
868 
869   if (GPRDest && GPRSrc) {
870     BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
871         .addReg(SrcReg, getKillRegState(KillSrc))
872         .add(predOps(ARMCC::AL))
873         .add(condCodeOp());
874     return;
875   }
876 
877   bool SPRDest = ARM::SPRRegClass.contains(DestReg);
878   bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
879 
880   unsigned Opc = 0;
881   if (SPRDest && SPRSrc)
882     Opc = ARM::VMOVS;
883   else if (GPRDest && SPRSrc)
884     Opc = ARM::VMOVRS;
885   else if (SPRDest && GPRSrc)
886     Opc = ARM::VMOVSR;
887   else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64())
888     Opc = ARM::VMOVD;
889   else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
890     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
891 
892   if (Opc) {
893     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
894     MIB.addReg(SrcReg, getKillRegState(KillSrc));
895     if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
896       MIB.addReg(SrcReg, getKillRegState(KillSrc));
897     if (Opc == ARM::MVE_VORR)
898       addUnpredicatedMveVpredROp(MIB, DestReg);
899     else if (Opc != ARM::MQPRCopy)
900       MIB.add(predOps(ARMCC::AL));
901     return;
902   }
903 
904   // Handle register classes that require multiple instructions.
905   unsigned BeginIdx = 0;
906   unsigned SubRegs = 0;
907   int Spacing = 1;
908 
909   // Use VORRq when possible.
910   if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
911     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
912     BeginIdx = ARM::qsub_0;
913     SubRegs = 2;
914   } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
915     Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
916     BeginIdx = ARM::qsub_0;
917     SubRegs = 4;
918   // Fall back to VMOVD.
919   } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
920     Opc = ARM::VMOVD;
921     BeginIdx = ARM::dsub_0;
922     SubRegs = 2;
923   } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
924     Opc = ARM::VMOVD;
925     BeginIdx = ARM::dsub_0;
926     SubRegs = 3;
927   } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
928     Opc = ARM::VMOVD;
929     BeginIdx = ARM::dsub_0;
930     SubRegs = 4;
931   } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
932     Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
933     BeginIdx = ARM::gsub_0;
934     SubRegs = 2;
935   } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
936     Opc = ARM::VMOVD;
937     BeginIdx = ARM::dsub_0;
938     SubRegs = 2;
939     Spacing = 2;
940   } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
941     Opc = ARM::VMOVD;
942     BeginIdx = ARM::dsub_0;
943     SubRegs = 3;
944     Spacing = 2;
945   } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
946     Opc = ARM::VMOVD;
947     BeginIdx = ARM::dsub_0;
948     SubRegs = 4;
949     Spacing = 2;
950   } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) &&
951              !Subtarget.hasFP64()) {
952     Opc = ARM::VMOVS;
953     BeginIdx = ARM::ssub_0;
954     SubRegs = 2;
955   } else if (SrcReg == ARM::CPSR) {
956     copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget);
957     return;
958   } else if (DestReg == ARM::CPSR) {
959     copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget);
960     return;
961   } else if (DestReg == ARM::VPR) {
962     assert(ARM::GPRRegClass.contains(SrcReg));
963     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg)
964         .addReg(SrcReg, getKillRegState(KillSrc))
965         .add(predOps(ARMCC::AL));
966     return;
967   } else if (SrcReg == ARM::VPR) {
968     assert(ARM::GPRRegClass.contains(DestReg));
969     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg)
970         .addReg(SrcReg, getKillRegState(KillSrc))
971         .add(predOps(ARMCC::AL));
972     return;
973   } else if (DestReg == ARM::FPSCR_NZCV) {
974     assert(ARM::GPRRegClass.contains(SrcReg));
975     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
976         .addReg(SrcReg, getKillRegState(KillSrc))
977         .add(predOps(ARMCC::AL));
978     return;
979   } else if (SrcReg == ARM::FPSCR_NZCV) {
980     assert(ARM::GPRRegClass.contains(DestReg));
981     BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
982         .addReg(SrcReg, getKillRegState(KillSrc))
983         .add(predOps(ARMCC::AL));
984     return;
985   }
986 
987   assert(Opc && "Impossible reg-to-reg copy");
988 
989   const TargetRegisterInfo *TRI = &getRegisterInfo();
990   MachineInstrBuilder Mov;
991 
992   // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
993   if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
994     BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
995     Spacing = -Spacing;
996   }
997 #ifndef NDEBUG
998   SmallSet<unsigned, 4> DstRegs;
999 #endif
1000   for (unsigned i = 0; i != SubRegs; ++i) {
1001     Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1002     Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1003     assert(Dst && Src && "Bad sub-register");
1004 #ifndef NDEBUG
1005     assert(!DstRegs.count(Src) && "destructive vector copy");
1006     DstRegs.insert(Dst);
1007 #endif
1008     Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
1009     // VORR (NEON or MVE) takes two source operands.
1010     if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1011       Mov.addReg(Src);
1012     }
1013     // MVE VORR takes predicate operands in place of an ordinary condition.
1014     if (Opc == ARM::MVE_VORR)
1015       addUnpredicatedMveVpredROp(Mov, Dst);
1016     else
1017       Mov = Mov.add(predOps(ARMCC::AL));
1018     // MOVr can set CC.
1019     if (Opc == ARM::MOVr)
1020       Mov = Mov.add(condCodeOp());
1021   }
1022   // Add implicit super-register defs and kills to the last instruction.
1023   Mov->addRegisterDefined(DestReg, TRI);
1024   if (KillSrc)
1025     Mov->addRegisterKilled(SrcReg, TRI);
1026 }
1027 
1028 Optional<DestSourcePair>
1029 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1030   // VMOVRRD is also a copy instruction but it requires
1031   // special way of handling. It is more complex copy version
1032   // and since that we are not considering it. For recognition
1033   // of such instruction isExtractSubregLike MI interface fuction
1034   // could be used.
1035   // VORRq is considered as a move only if two inputs are
1036   // the same register.
1037   if (!MI.isMoveReg() ||
1038       (MI.getOpcode() == ARM::VORRq &&
1039        MI.getOperand(1).getReg() != MI.getOperand(2).getReg()))
1040     return None;
1041   return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1042 }
1043 
1044 Optional<ParamLoadedValue>
1045 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
1046                                       Register Reg) const {
1047   if (auto DstSrcPair = isCopyInstrImpl(MI)) {
1048     Register DstReg = DstSrcPair->Destination->getReg();
1049 
1050     // TODO: We don't handle cases where the forwarding reg is narrower/wider
1051     // than the copy registers. Consider for example:
1052     //
1053     //   s16 = VMOVS s0
1054     //   s17 = VMOVS s1
1055     //   call @callee(d0)
1056     //
1057     // We'd like to describe the call site value of d0 as d8, but this requires
1058     // gathering and merging the descriptions for the two VMOVS instructions.
1059     //
1060     // We also don't handle the reverse situation, where the forwarding reg is
1061     // narrower than the copy destination:
1062     //
1063     //   d8 = VMOVD d0
1064     //   call @callee(s1)
1065     //
1066     // We need to produce a fragment description (the call site value of s1 is
1067     // /not/ just d8).
1068     if (DstReg != Reg)
1069       return None;
1070   }
1071   return TargetInstrInfo::describeLoadedValue(MI, Reg);
1072 }
1073 
1074 const MachineInstrBuilder &
1075 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
1076                           unsigned SubIdx, unsigned State,
1077                           const TargetRegisterInfo *TRI) const {
1078   if (!SubIdx)
1079     return MIB.addReg(Reg, State);
1080 
1081   if (Register::isPhysicalRegister(Reg))
1082     return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1083   return MIB.addReg(Reg, State, SubIdx);
1084 }
1085 
1086 void ARMBaseInstrInfo::
1087 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1088                     Register SrcReg, bool isKill, int FI,
1089                     const TargetRegisterClass *RC,
1090                     const TargetRegisterInfo *TRI) const {
1091   MachineFunction &MF = *MBB.getParent();
1092   MachineFrameInfo &MFI = MF.getFrameInfo();
1093   Align Alignment = MFI.getObjectAlign(FI);
1094 
1095   MachineMemOperand *MMO = MF.getMachineMemOperand(
1096       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
1097       MFI.getObjectSize(FI), Alignment);
1098 
1099   switch (TRI->getSpillSize(*RC)) {
1100     case 2:
1101       if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1102         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
1103             .addReg(SrcReg, getKillRegState(isKill))
1104             .addFrameIndex(FI)
1105             .addImm(0)
1106             .addMemOperand(MMO)
1107             .add(predOps(ARMCC::AL));
1108       } else
1109         llvm_unreachable("Unknown reg class!");
1110       break;
1111     case 4:
1112       if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1113         BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12))
1114             .addReg(SrcReg, getKillRegState(isKill))
1115             .addFrameIndex(FI)
1116             .addImm(0)
1117             .addMemOperand(MMO)
1118             .add(predOps(ARMCC::AL));
1119       } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1120         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS))
1121             .addReg(SrcReg, getKillRegState(isKill))
1122             .addFrameIndex(FI)
1123             .addImm(0)
1124             .addMemOperand(MMO)
1125             .add(predOps(ARMCC::AL));
1126       } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1127         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off))
1128             .addReg(SrcReg, getKillRegState(isKill))
1129             .addFrameIndex(FI)
1130             .addImm(0)
1131             .addMemOperand(MMO)
1132             .add(predOps(ARMCC::AL));
1133       } else
1134         llvm_unreachable("Unknown reg class!");
1135       break;
1136     case 8:
1137       if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1138         BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD))
1139             .addReg(SrcReg, getKillRegState(isKill))
1140             .addFrameIndex(FI)
1141             .addImm(0)
1142             .addMemOperand(MMO)
1143             .add(predOps(ARMCC::AL));
1144       } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1145         if (Subtarget.hasV5TEOps()) {
1146           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
1147           AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1148           AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1149           MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1150              .add(predOps(ARMCC::AL));
1151         } else {
1152           // Fallback to STM instruction, which has existed since the dawn of
1153           // time.
1154           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA))
1155                                         .addFrameIndex(FI)
1156                                         .addMemOperand(MMO)
1157                                         .add(predOps(ARMCC::AL));
1158           AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1159           AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1160         }
1161       } else
1162         llvm_unreachable("Unknown reg class!");
1163       break;
1164     case 16:
1165       if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1166         // Use aligned spills if the stack can be realigned.
1167         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1168           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64))
1169               .addFrameIndex(FI)
1170               .addImm(16)
1171               .addReg(SrcReg, getKillRegState(isKill))
1172               .addMemOperand(MMO)
1173               .add(predOps(ARMCC::AL));
1174         } else {
1175           BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA))
1176               .addReg(SrcReg, getKillRegState(isKill))
1177               .addFrameIndex(FI)
1178               .addMemOperand(MMO)
1179               .add(predOps(ARMCC::AL));
1180         }
1181       } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1182                  Subtarget.hasMVEIntegerOps()) {
1183         auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32));
1184         MIB.addReg(SrcReg, getKillRegState(isKill))
1185           .addFrameIndex(FI)
1186           .addImm(0)
1187           .addMemOperand(MMO);
1188         addUnpredicatedMveVpredNOp(MIB);
1189       } else
1190         llvm_unreachable("Unknown reg class!");
1191       break;
1192     case 24:
1193       if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1194         // Use aligned spills if the stack can be realigned.
1195         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1196             Subtarget.hasNEON()) {
1197           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo))
1198               .addFrameIndex(FI)
1199               .addImm(16)
1200               .addReg(SrcReg, getKillRegState(isKill))
1201               .addMemOperand(MMO)
1202               .add(predOps(ARMCC::AL));
1203         } else {
1204           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1205                                             get(ARM::VSTMDIA))
1206                                         .addFrameIndex(FI)
1207                                         .add(predOps(ARMCC::AL))
1208                                         .addMemOperand(MMO);
1209           MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1210           MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1211           AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1212         }
1213       } else
1214         llvm_unreachable("Unknown reg class!");
1215       break;
1216     case 32:
1217       if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1218           ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1219           ARM::DQuadRegClass.hasSubClassEq(RC)) {
1220         if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1221             Subtarget.hasNEON()) {
1222           // FIXME: It's possible to only store part of the QQ register if the
1223           // spilled def has a sub-register index.
1224           BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo))
1225               .addFrameIndex(FI)
1226               .addImm(16)
1227               .addReg(SrcReg, getKillRegState(isKill))
1228               .addMemOperand(MMO)
1229               .add(predOps(ARMCC::AL));
1230         } else if (Subtarget.hasMVEIntegerOps()) {
1231           BuildMI(MBB, I, DebugLoc(), get(ARM::MQQPRStore))
1232               .addReg(SrcReg, getKillRegState(isKill))
1233               .addFrameIndex(FI)
1234               .addMemOperand(MMO);
1235         } else {
1236           MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1237                                             get(ARM::VSTMDIA))
1238                                         .addFrameIndex(FI)
1239                                         .add(predOps(ARMCC::AL))
1240                                         .addMemOperand(MMO);
1241           MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1242           MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1243           MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1244                 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1245         }
1246       } else
1247         llvm_unreachable("Unknown reg class!");
1248       break;
1249     case 64:
1250       if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1251           Subtarget.hasMVEIntegerOps()) {
1252         BuildMI(MBB, I, DebugLoc(), get(ARM::MQQQQPRStore))
1253             .addReg(SrcReg, getKillRegState(isKill))
1254             .addFrameIndex(FI)
1255             .addMemOperand(MMO);
1256       } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1257         MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA))
1258                                       .addFrameIndex(FI)
1259                                       .add(predOps(ARMCC::AL))
1260                                       .addMemOperand(MMO);
1261         MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1262         MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1263         MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1264         MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1265         MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
1266         MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
1267         MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
1268               AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
1269       } else
1270         llvm_unreachable("Unknown reg class!");
1271       break;
1272     default:
1273       llvm_unreachable("Unknown reg class!");
1274   }
1275 }
1276 
1277 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1278                                               int &FrameIndex) const {
1279   switch (MI.getOpcode()) {
1280   default: break;
1281   case ARM::STRrs:
1282   case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
1283     if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1284         MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1285         MI.getOperand(3).getImm() == 0) {
1286       FrameIndex = MI.getOperand(1).getIndex();
1287       return MI.getOperand(0).getReg();
1288     }
1289     break;
1290   case ARM::STRi12:
1291   case ARM::t2STRi12:
1292   case ARM::tSTRspi:
1293   case ARM::VSTRD:
1294   case ARM::VSTRS:
1295   case ARM::VSTR_P0_off:
1296   case ARM::MVE_VSTRWU32:
1297     if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1298         MI.getOperand(2).getImm() == 0) {
1299       FrameIndex = MI.getOperand(1).getIndex();
1300       return MI.getOperand(0).getReg();
1301     }
1302     break;
1303   case ARM::VST1q64:
1304   case ARM::VST1d64TPseudo:
1305   case ARM::VST1d64QPseudo:
1306     if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) {
1307       FrameIndex = MI.getOperand(0).getIndex();
1308       return MI.getOperand(2).getReg();
1309     }
1310     break;
1311   case ARM::VSTMQIA:
1312     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1313       FrameIndex = MI.getOperand(1).getIndex();
1314       return MI.getOperand(0).getReg();
1315     }
1316     break;
1317   case ARM::MQQPRStore:
1318   case ARM::MQQQQPRStore:
1319     if (MI.getOperand(1).isFI()) {
1320       FrameIndex = MI.getOperand(1).getIndex();
1321       return MI.getOperand(0).getReg();
1322     }
1323     break;
1324   }
1325 
1326   return 0;
1327 }
1328 
1329 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
1330                                                     int &FrameIndex) const {
1331   SmallVector<const MachineMemOperand *, 1> Accesses;
1332   if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) &&
1333       Accesses.size() == 1) {
1334     FrameIndex =
1335         cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1336             ->getFrameIndex();
1337     return true;
1338   }
1339   return false;
1340 }
1341 
1342 void ARMBaseInstrInfo::
1343 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1344                      Register DestReg, int FI,
1345                      const TargetRegisterClass *RC,
1346                      const TargetRegisterInfo *TRI) const {
1347   DebugLoc DL;
1348   if (I != MBB.end()) DL = I->getDebugLoc();
1349   MachineFunction &MF = *MBB.getParent();
1350   MachineFrameInfo &MFI = MF.getFrameInfo();
1351   const Align Alignment = MFI.getObjectAlign(FI);
1352   MachineMemOperand *MMO = MF.getMachineMemOperand(
1353       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1354       MFI.getObjectSize(FI), Alignment);
1355 
1356   switch (TRI->getSpillSize(*RC)) {
1357   case 2:
1358     if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1359       BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
1360           .addFrameIndex(FI)
1361           .addImm(0)
1362           .addMemOperand(MMO)
1363           .add(predOps(ARMCC::AL));
1364     } else
1365       llvm_unreachable("Unknown reg class!");
1366     break;
1367   case 4:
1368     if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1369       BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
1370           .addFrameIndex(FI)
1371           .addImm(0)
1372           .addMemOperand(MMO)
1373           .add(predOps(ARMCC::AL));
1374     } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1375       BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
1376           .addFrameIndex(FI)
1377           .addImm(0)
1378           .addMemOperand(MMO)
1379           .add(predOps(ARMCC::AL));
1380     } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1381       BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg)
1382           .addFrameIndex(FI)
1383           .addImm(0)
1384           .addMemOperand(MMO)
1385           .add(predOps(ARMCC::AL));
1386     } else
1387       llvm_unreachable("Unknown reg class!");
1388     break;
1389   case 8:
1390     if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1391       BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
1392           .addFrameIndex(FI)
1393           .addImm(0)
1394           .addMemOperand(MMO)
1395           .add(predOps(ARMCC::AL));
1396     } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1397       MachineInstrBuilder MIB;
1398 
1399       if (Subtarget.hasV5TEOps()) {
1400         MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
1401         AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1402         AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1403         MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1404            .add(predOps(ARMCC::AL));
1405       } else {
1406         // Fallback to LDM instruction, which has existed since the dawn of
1407         // time.
1408         MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA))
1409                   .addFrameIndex(FI)
1410                   .addMemOperand(MMO)
1411                   .add(predOps(ARMCC::AL));
1412         MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1413         MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1414       }
1415 
1416       if (Register::isPhysicalRegister(DestReg))
1417         MIB.addReg(DestReg, RegState::ImplicitDefine);
1418     } else
1419       llvm_unreachable("Unknown reg class!");
1420     break;
1421   case 16:
1422     if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1423       if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1424         BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
1425             .addFrameIndex(FI)
1426             .addImm(16)
1427             .addMemOperand(MMO)
1428             .add(predOps(ARMCC::AL));
1429       } else {
1430         BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
1431             .addFrameIndex(FI)
1432             .addMemOperand(MMO)
1433             .add(predOps(ARMCC::AL));
1434       }
1435     } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1436                Subtarget.hasMVEIntegerOps()) {
1437       auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg);
1438       MIB.addFrameIndex(FI)
1439         .addImm(0)
1440         .addMemOperand(MMO);
1441       addUnpredicatedMveVpredNOp(MIB);
1442     } else
1443       llvm_unreachable("Unknown reg class!");
1444     break;
1445   case 24:
1446     if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1447       if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1448           Subtarget.hasNEON()) {
1449         BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
1450             .addFrameIndex(FI)
1451             .addImm(16)
1452             .addMemOperand(MMO)
1453             .add(predOps(ARMCC::AL));
1454       } else {
1455         MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1456                                       .addFrameIndex(FI)
1457                                       .addMemOperand(MMO)
1458                                       .add(predOps(ARMCC::AL));
1459         MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1460         MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1461         MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1462         if (Register::isPhysicalRegister(DestReg))
1463           MIB.addReg(DestReg, RegState::ImplicitDefine);
1464       }
1465     } else
1466       llvm_unreachable("Unknown reg class!");
1467     break;
1468    case 32:
1469      if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1470          ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1471          ARM::DQuadRegClass.hasSubClassEq(RC)) {
1472        if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1473            Subtarget.hasNEON()) {
1474          BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
1475              .addFrameIndex(FI)
1476              .addImm(16)
1477              .addMemOperand(MMO)
1478              .add(predOps(ARMCC::AL));
1479        } else if (Subtarget.hasMVEIntegerOps()) {
1480          BuildMI(MBB, I, DL, get(ARM::MQQPRLoad), DestReg)
1481              .addFrameIndex(FI)
1482              .addMemOperand(MMO);
1483        } else {
1484          MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1485                                        .addFrameIndex(FI)
1486                                        .add(predOps(ARMCC::AL))
1487                                        .addMemOperand(MMO);
1488          MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1489          MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1490          MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1491          MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1492          if (Register::isPhysicalRegister(DestReg))
1493            MIB.addReg(DestReg, RegState::ImplicitDefine);
1494        }
1495      } else
1496        llvm_unreachable("Unknown reg class!");
1497      break;
1498   case 64:
1499     if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1500         Subtarget.hasMVEIntegerOps()) {
1501       BuildMI(MBB, I, DL, get(ARM::MQQQQPRLoad), DestReg)
1502           .addFrameIndex(FI)
1503           .addMemOperand(MMO);
1504     } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1505       MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1506                                     .addFrameIndex(FI)
1507                                     .add(predOps(ARMCC::AL))
1508                                     .addMemOperand(MMO);
1509       MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1510       MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1511       MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1512       MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1513       MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
1514       MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
1515       MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
1516       MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
1517       if (Register::isPhysicalRegister(DestReg))
1518         MIB.addReg(DestReg, RegState::ImplicitDefine);
1519     } else
1520       llvm_unreachable("Unknown reg class!");
1521     break;
1522   default:
1523     llvm_unreachable("Unknown regclass!");
1524   }
1525 }
1526 
1527 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1528                                                int &FrameIndex) const {
1529   switch (MI.getOpcode()) {
1530   default: break;
1531   case ARM::LDRrs:
1532   case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
1533     if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1534         MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1535         MI.getOperand(3).getImm() == 0) {
1536       FrameIndex = MI.getOperand(1).getIndex();
1537       return MI.getOperand(0).getReg();
1538     }
1539     break;
1540   case ARM::LDRi12:
1541   case ARM::t2LDRi12:
1542   case ARM::tLDRspi:
1543   case ARM::VLDRD:
1544   case ARM::VLDRS:
1545   case ARM::VLDR_P0_off:
1546   case ARM::MVE_VLDRWU32:
1547     if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1548         MI.getOperand(2).getImm() == 0) {
1549       FrameIndex = MI.getOperand(1).getIndex();
1550       return MI.getOperand(0).getReg();
1551     }
1552     break;
1553   case ARM::VLD1q64:
1554   case ARM::VLD1d8TPseudo:
1555   case ARM::VLD1d16TPseudo:
1556   case ARM::VLD1d32TPseudo:
1557   case ARM::VLD1d64TPseudo:
1558   case ARM::VLD1d8QPseudo:
1559   case ARM::VLD1d16QPseudo:
1560   case ARM::VLD1d32QPseudo:
1561   case ARM::VLD1d64QPseudo:
1562     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1563       FrameIndex = MI.getOperand(1).getIndex();
1564       return MI.getOperand(0).getReg();
1565     }
1566     break;
1567   case ARM::VLDMQIA:
1568     if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1569       FrameIndex = MI.getOperand(1).getIndex();
1570       return MI.getOperand(0).getReg();
1571     }
1572     break;
1573   case ARM::MQQPRLoad:
1574   case ARM::MQQQQPRLoad:
1575     if (MI.getOperand(1).isFI()) {
1576       FrameIndex = MI.getOperand(1).getIndex();
1577       return MI.getOperand(0).getReg();
1578     }
1579     break;
1580   }
1581 
1582   return 0;
1583 }
1584 
1585 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
1586                                                      int &FrameIndex) const {
1587   SmallVector<const MachineMemOperand *, 1> Accesses;
1588   if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) &&
1589       Accesses.size() == 1) {
1590     FrameIndex =
1591         cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1592             ->getFrameIndex();
1593     return true;
1594   }
1595   return false;
1596 }
1597 
1598 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
1599 /// depending on whether the result is used.
1600 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
1601   bool isThumb1 = Subtarget.isThumb1Only();
1602   bool isThumb2 = Subtarget.isThumb2();
1603   const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
1604 
1605   DebugLoc dl = MI->getDebugLoc();
1606   MachineBasicBlock *BB = MI->getParent();
1607 
1608   MachineInstrBuilder LDM, STM;
1609   if (isThumb1 || !MI->getOperand(1).isDead()) {
1610     MachineOperand LDWb(MI->getOperand(1));
1611     LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1612                                                  : isThumb1 ? ARM::tLDMIA_UPD
1613                                                             : ARM::LDMIA_UPD))
1614               .add(LDWb);
1615   } else {
1616     LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1617   }
1618 
1619   if (isThumb1 || !MI->getOperand(0).isDead()) {
1620     MachineOperand STWb(MI->getOperand(0));
1621     STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD
1622                                                  : isThumb1 ? ARM::tSTMIA_UPD
1623                                                             : ARM::STMIA_UPD))
1624               .add(STWb);
1625   } else {
1626     STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1627   }
1628 
1629   MachineOperand LDBase(MI->getOperand(3));
1630   LDM.add(LDBase).add(predOps(ARMCC::AL));
1631 
1632   MachineOperand STBase(MI->getOperand(2));
1633   STM.add(STBase).add(predOps(ARMCC::AL));
1634 
1635   // Sort the scratch registers into ascending order.
1636   const TargetRegisterInfo &TRI = getRegisterInfo();
1637   SmallVector<unsigned, 6> ScratchRegs;
1638   for(unsigned I = 5; I < MI->getNumOperands(); ++I)
1639     ScratchRegs.push_back(MI->getOperand(I).getReg());
1640   llvm::sort(ScratchRegs,
1641              [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
1642                return TRI.getEncodingValue(Reg1) <
1643                       TRI.getEncodingValue(Reg2);
1644              });
1645 
1646   for (const auto &Reg : ScratchRegs) {
1647     LDM.addReg(Reg, RegState::Define);
1648     STM.addReg(Reg, RegState::Kill);
1649   }
1650 
1651   BB->erase(MI);
1652 }
1653 
1654 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1655   if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1656     expandLoadStackGuard(MI);
1657     MI.getParent()->erase(MI);
1658     return true;
1659   }
1660 
1661   if (MI.getOpcode() == ARM::MEMCPY) {
1662     expandMEMCPY(MI);
1663     return true;
1664   }
1665 
1666   // This hook gets to expand COPY instructions before they become
1667   // copyPhysReg() calls.  Look for VMOVS instructions that can legally be
1668   // widened to VMOVD.  We prefer the VMOVD when possible because it may be
1669   // changed into a VORR that can go down the NEON pipeline.
1670   if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1671     return false;
1672 
1673   // Look for a copy between even S-registers.  That is where we keep floats
1674   // when using NEON v2f32 instructions for f32 arithmetic.
1675   Register DstRegS = MI.getOperand(0).getReg();
1676   Register SrcRegS = MI.getOperand(1).getReg();
1677   if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1678     return false;
1679 
1680   const TargetRegisterInfo *TRI = &getRegisterInfo();
1681   unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1682                                               &ARM::DPRRegClass);
1683   unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1684                                               &ARM::DPRRegClass);
1685   if (!DstRegD || !SrcRegD)
1686     return false;
1687 
1688   // We want to widen this into a DstRegD = VMOVD SrcRegD copy.  This is only
1689   // legal if the COPY already defines the full DstRegD, and it isn't a
1690   // sub-register insertion.
1691   if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI))
1692     return false;
1693 
1694   // A dead copy shouldn't show up here, but reject it just in case.
1695   if (MI.getOperand(0).isDead())
1696     return false;
1697 
1698   // All clear, widen the COPY.
1699   LLVM_DEBUG(dbgs() << "widening:    " << MI);
1700   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
1701 
1702   // Get rid of the old implicit-def of DstRegD.  Leave it if it defines a Q-reg
1703   // or some other super-register.
1704   int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
1705   if (ImpDefIdx != -1)
1706     MI.RemoveOperand(ImpDefIdx);
1707 
1708   // Change the opcode and operands.
1709   MI.setDesc(get(ARM::VMOVD));
1710   MI.getOperand(0).setReg(DstRegD);
1711   MI.getOperand(1).setReg(SrcRegD);
1712   MIB.add(predOps(ARMCC::AL));
1713 
1714   // We are now reading SrcRegD instead of SrcRegS.  This may upset the
1715   // register scavenger and machine verifier, so we need to indicate that we
1716   // are reading an undefined value from SrcRegD, but a proper value from
1717   // SrcRegS.
1718   MI.getOperand(1).setIsUndef();
1719   MIB.addReg(SrcRegS, RegState::Implicit);
1720 
1721   // SrcRegD may actually contain an unrelated value in the ssub_1
1722   // sub-register.  Don't kill it.  Only kill the ssub_0 sub-register.
1723   if (MI.getOperand(1).isKill()) {
1724     MI.getOperand(1).setIsKill(false);
1725     MI.addRegisterKilled(SrcRegS, TRI, true);
1726   }
1727 
1728   LLVM_DEBUG(dbgs() << "replaced by: " << MI);
1729   return true;
1730 }
1731 
1732 /// Create a copy of a const pool value. Update CPI to the new index and return
1733 /// the label UID.
1734 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1735   MachineConstantPool *MCP = MF.getConstantPool();
1736   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1737 
1738   const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1739   assert(MCPE.isMachineConstantPoolEntry() &&
1740          "Expecting a machine constantpool entry!");
1741   ARMConstantPoolValue *ACPV =
1742     static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1743 
1744   unsigned PCLabelId = AFI->createPICLabelUId();
1745   ARMConstantPoolValue *NewCPV = nullptr;
1746 
1747   // FIXME: The below assumes PIC relocation model and that the function
1748   // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1749   // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1750   // instructions, so that's probably OK, but is PIC always correct when
1751   // we get here?
1752   if (ACPV->isGlobalValue())
1753     NewCPV = ARMConstantPoolConstant::Create(
1754         cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue,
1755         4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
1756   else if (ACPV->isExtSymbol())
1757     NewCPV = ARMConstantPoolSymbol::
1758       Create(MF.getFunction().getContext(),
1759              cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1760   else if (ACPV->isBlockAddress())
1761     NewCPV = ARMConstantPoolConstant::
1762       Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1763              ARMCP::CPBlockAddress, 4);
1764   else if (ACPV->isLSDA())
1765     NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
1766                                              ARMCP::CPLSDA, 4);
1767   else if (ACPV->isMachineBasicBlock())
1768     NewCPV = ARMConstantPoolMBB::
1769       Create(MF.getFunction().getContext(),
1770              cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1771   else
1772     llvm_unreachable("Unexpected ARM constantpool value type!!");
1773   CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign());
1774   return PCLabelId;
1775 }
1776 
1777 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
1778                                      MachineBasicBlock::iterator I,
1779                                      Register DestReg, unsigned SubIdx,
1780                                      const MachineInstr &Orig,
1781                                      const TargetRegisterInfo &TRI) const {
1782   unsigned Opcode = Orig.getOpcode();
1783   switch (Opcode) {
1784   default: {
1785     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1786     MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1787     MBB.insert(I, MI);
1788     break;
1789   }
1790   case ARM::tLDRpci_pic:
1791   case ARM::t2LDRpci_pic: {
1792     MachineFunction &MF = *MBB.getParent();
1793     unsigned CPI = Orig.getOperand(1).getIndex();
1794     unsigned PCLabelId = duplicateCPV(MF, CPI);
1795     BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1796         .addConstantPoolIndex(CPI)
1797         .addImm(PCLabelId)
1798         .cloneMemRefs(Orig);
1799     break;
1800   }
1801   }
1802 }
1803 
1804 MachineInstr &
1805 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB,
1806     MachineBasicBlock::iterator InsertBefore,
1807     const MachineInstr &Orig) const {
1808   MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig);
1809   MachineBasicBlock::instr_iterator I = Cloned.getIterator();
1810   for (;;) {
1811     switch (I->getOpcode()) {
1812     case ARM::tLDRpci_pic:
1813     case ARM::t2LDRpci_pic: {
1814       MachineFunction &MF = *MBB.getParent();
1815       unsigned CPI = I->getOperand(1).getIndex();
1816       unsigned PCLabelId = duplicateCPV(MF, CPI);
1817       I->getOperand(1).setIndex(CPI);
1818       I->getOperand(2).setImm(PCLabelId);
1819       break;
1820     }
1821     }
1822     if (!I->isBundledWithSucc())
1823       break;
1824     ++I;
1825   }
1826   return Cloned;
1827 }
1828 
1829 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
1830                                         const MachineInstr &MI1,
1831                                         const MachineRegisterInfo *MRI) const {
1832   unsigned Opcode = MI0.getOpcode();
1833   if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1834       Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1835       Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1836       Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1837       Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1838       Opcode == ARM::t2MOV_ga_pcrel) {
1839     if (MI1.getOpcode() != Opcode)
1840       return false;
1841     if (MI0.getNumOperands() != MI1.getNumOperands())
1842       return false;
1843 
1844     const MachineOperand &MO0 = MI0.getOperand(1);
1845     const MachineOperand &MO1 = MI1.getOperand(1);
1846     if (MO0.getOffset() != MO1.getOffset())
1847       return false;
1848 
1849     if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1850         Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1851         Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1852         Opcode == ARM::t2MOV_ga_pcrel)
1853       // Ignore the PC labels.
1854       return MO0.getGlobal() == MO1.getGlobal();
1855 
1856     const MachineFunction *MF = MI0.getParent()->getParent();
1857     const MachineConstantPool *MCP = MF->getConstantPool();
1858     int CPI0 = MO0.getIndex();
1859     int CPI1 = MO1.getIndex();
1860     const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1861     const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1862     bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1863     bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1864     if (isARMCP0 && isARMCP1) {
1865       ARMConstantPoolValue *ACPV0 =
1866         static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1867       ARMConstantPoolValue *ACPV1 =
1868         static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1869       return ACPV0->hasSameValue(ACPV1);
1870     } else if (!isARMCP0 && !isARMCP1) {
1871       return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1872     }
1873     return false;
1874   } else if (Opcode == ARM::PICLDR) {
1875     if (MI1.getOpcode() != Opcode)
1876       return false;
1877     if (MI0.getNumOperands() != MI1.getNumOperands())
1878       return false;
1879 
1880     Register Addr0 = MI0.getOperand(1).getReg();
1881     Register Addr1 = MI1.getOperand(1).getReg();
1882     if (Addr0 != Addr1) {
1883       if (!MRI || !Register::isVirtualRegister(Addr0) ||
1884           !Register::isVirtualRegister(Addr1))
1885         return false;
1886 
1887       // This assumes SSA form.
1888       MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1889       MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1890       // Check if the loaded value, e.g. a constantpool of a global address, are
1891       // the same.
1892       if (!produceSameValue(*Def0, *Def1, MRI))
1893         return false;
1894     }
1895 
1896     for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
1897       // %12 = PICLDR %11, 0, 14, %noreg
1898       const MachineOperand &MO0 = MI0.getOperand(i);
1899       const MachineOperand &MO1 = MI1.getOperand(i);
1900       if (!MO0.isIdenticalTo(MO1))
1901         return false;
1902     }
1903     return true;
1904   }
1905 
1906   return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1907 }
1908 
1909 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1910 /// determine if two loads are loading from the same base address. It should
1911 /// only return true if the base pointers are the same and the only differences
1912 /// between the two addresses is the offset. It also returns the offsets by
1913 /// reference.
1914 ///
1915 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1916 /// is permanently disabled.
1917 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1918                                                int64_t &Offset1,
1919                                                int64_t &Offset2) const {
1920   // Don't worry about Thumb: just ARM and Thumb2.
1921   if (Subtarget.isThumb1Only()) return false;
1922 
1923   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1924     return false;
1925 
1926   switch (Load1->getMachineOpcode()) {
1927   default:
1928     return false;
1929   case ARM::LDRi12:
1930   case ARM::LDRBi12:
1931   case ARM::LDRD:
1932   case ARM::LDRH:
1933   case ARM::LDRSB:
1934   case ARM::LDRSH:
1935   case ARM::VLDRD:
1936   case ARM::VLDRS:
1937   case ARM::t2LDRi8:
1938   case ARM::t2LDRBi8:
1939   case ARM::t2LDRDi8:
1940   case ARM::t2LDRSHi8:
1941   case ARM::t2LDRi12:
1942   case ARM::t2LDRBi12:
1943   case ARM::t2LDRSHi12:
1944     break;
1945   }
1946 
1947   switch (Load2->getMachineOpcode()) {
1948   default:
1949     return false;
1950   case ARM::LDRi12:
1951   case ARM::LDRBi12:
1952   case ARM::LDRD:
1953   case ARM::LDRH:
1954   case ARM::LDRSB:
1955   case ARM::LDRSH:
1956   case ARM::VLDRD:
1957   case ARM::VLDRS:
1958   case ARM::t2LDRi8:
1959   case ARM::t2LDRBi8:
1960   case ARM::t2LDRSHi8:
1961   case ARM::t2LDRi12:
1962   case ARM::t2LDRBi12:
1963   case ARM::t2LDRSHi12:
1964     break;
1965   }
1966 
1967   // Check if base addresses and chain operands match.
1968   if (Load1->getOperand(0) != Load2->getOperand(0) ||
1969       Load1->getOperand(4) != Load2->getOperand(4))
1970     return false;
1971 
1972   // Index should be Reg0.
1973   if (Load1->getOperand(3) != Load2->getOperand(3))
1974     return false;
1975 
1976   // Determine the offsets.
1977   if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
1978       isa<ConstantSDNode>(Load2->getOperand(1))) {
1979     Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
1980     Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
1981     return true;
1982   }
1983 
1984   return false;
1985 }
1986 
1987 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
1988 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
1989 /// be scheduled togther. On some targets if two loads are loading from
1990 /// addresses in the same cache line, it's better if they are scheduled
1991 /// together. This function takes two integers that represent the load offsets
1992 /// from the common base address. It returns true if it decides it's desirable
1993 /// to schedule the two loads together. "NumLoads" is the number of loads that
1994 /// have already been scheduled after Load1.
1995 ///
1996 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1997 /// is permanently disabled.
1998 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1999                                                int64_t Offset1, int64_t Offset2,
2000                                                unsigned NumLoads) const {
2001   // Don't worry about Thumb: just ARM and Thumb2.
2002   if (Subtarget.isThumb1Only()) return false;
2003 
2004   assert(Offset2 > Offset1);
2005 
2006   if ((Offset2 - Offset1) / 8 > 64)
2007     return false;
2008 
2009   // Check if the machine opcodes are different. If they are different
2010   // then we consider them to not be of the same base address,
2011   // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
2012   // In this case, they are considered to be the same because they are different
2013   // encoding forms of the same basic instruction.
2014   if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
2015       !((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
2016          Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
2017         (Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
2018          Load2->getMachineOpcode() == ARM::t2LDRBi8)))
2019     return false;  // FIXME: overly conservative?
2020 
2021   // Four loads in a row should be sufficient.
2022   if (NumLoads >= 3)
2023     return false;
2024 
2025   return true;
2026 }
2027 
2028 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
2029                                             const MachineBasicBlock *MBB,
2030                                             const MachineFunction &MF) const {
2031   // Debug info is never a scheduling boundary. It's necessary to be explicit
2032   // due to the special treatment of IT instructions below, otherwise a
2033   // dbg_value followed by an IT will result in the IT instruction being
2034   // considered a scheduling hazard, which is wrong. It should be the actual
2035   // instruction preceding the dbg_value instruction(s), just like it is
2036   // when debug info is not present.
2037   if (MI.isDebugInstr())
2038     return false;
2039 
2040   // Terminators and labels can't be scheduled around.
2041   if (MI.isTerminator() || MI.isPosition())
2042     return true;
2043 
2044   // INLINEASM_BR can jump to another block
2045   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2046     return true;
2047 
2048   // Treat the start of the IT block as a scheduling boundary, but schedule
2049   // t2IT along with all instructions following it.
2050   // FIXME: This is a big hammer. But the alternative is to add all potential
2051   // true and anti dependencies to IT block instructions as implicit operands
2052   // to the t2IT instruction. The added compile time and complexity does not
2053   // seem worth it.
2054   MachineBasicBlock::const_iterator I = MI;
2055   // Make sure to skip any debug instructions
2056   while (++I != MBB->end() && I->isDebugInstr())
2057     ;
2058   if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
2059     return true;
2060 
2061   // Don't attempt to schedule around any instruction that defines
2062   // a stack-oriented pointer, as it's unlikely to be profitable. This
2063   // saves compile time, because it doesn't require every single
2064   // stack slot reference to depend on the instruction that does the
2065   // modification.
2066   // Calls don't actually change the stack pointer, even if they have imp-defs.
2067   // No ARM calling conventions change the stack pointer. (X86 calling
2068   // conventions sometimes do).
2069   if (!MI.isCall() && MI.definesRegister(ARM::SP))
2070     return true;
2071 
2072   return false;
2073 }
2074 
2075 bool ARMBaseInstrInfo::
2076 isProfitableToIfCvt(MachineBasicBlock &MBB,
2077                     unsigned NumCycles, unsigned ExtraPredCycles,
2078                     BranchProbability Probability) const {
2079   if (!NumCycles)
2080     return false;
2081 
2082   // If we are optimizing for size, see if the branch in the predecessor can be
2083   // lowered to cbn?z by the constant island lowering pass, and return false if
2084   // so. This results in a shorter instruction sequence.
2085   if (MBB.getParent()->getFunction().hasOptSize()) {
2086     MachineBasicBlock *Pred = *MBB.pred_begin();
2087     if (!Pred->empty()) {
2088       MachineInstr *LastMI = &*Pred->rbegin();
2089       if (LastMI->getOpcode() == ARM::t2Bcc) {
2090         const TargetRegisterInfo *TRI = &getRegisterInfo();
2091         MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI);
2092         if (CmpMI)
2093           return false;
2094       }
2095     }
2096   }
2097   return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles,
2098                              MBB, 0, 0, Probability);
2099 }
2100 
2101 bool ARMBaseInstrInfo::
2102 isProfitableToIfCvt(MachineBasicBlock &TBB,
2103                     unsigned TCycles, unsigned TExtra,
2104                     MachineBasicBlock &FBB,
2105                     unsigned FCycles, unsigned FExtra,
2106                     BranchProbability Probability) const {
2107   if (!TCycles)
2108     return false;
2109 
2110   // In thumb code we often end up trading one branch for a IT block, and
2111   // if we are cloning the instruction can increase code size. Prevent
2112   // blocks with multiple predecesors from being ifcvted to prevent this
2113   // cloning.
2114   if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) {
2115     if (TBB.pred_size() != 1 || FBB.pred_size() != 1)
2116       return false;
2117   }
2118 
2119   // Attempt to estimate the relative costs of predication versus branching.
2120   // Here we scale up each component of UnpredCost to avoid precision issue when
2121   // scaling TCycles/FCycles by Probability.
2122   const unsigned ScalingUpFactor = 1024;
2123 
2124   unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2125   unsigned UnpredCost;
2126   if (!Subtarget.hasBranchPredictor()) {
2127     // When we don't have a branch predictor it's always cheaper to not take a
2128     // branch than take it, so we have to take that into account.
2129     unsigned NotTakenBranchCost = 1;
2130     unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2131     unsigned TUnpredCycles, FUnpredCycles;
2132     if (!FCycles) {
2133       // Triangle: TBB is the fallthrough
2134       TUnpredCycles = TCycles + NotTakenBranchCost;
2135       FUnpredCycles = TakenBranchCost;
2136     } else {
2137       // Diamond: TBB is the block that is branched to, FBB is the fallthrough
2138       TUnpredCycles = TCycles + TakenBranchCost;
2139       FUnpredCycles = FCycles + NotTakenBranchCost;
2140       // The branch at the end of FBB will disappear when it's predicated, so
2141       // discount it from PredCost.
2142       PredCost -= 1 * ScalingUpFactor;
2143     }
2144     // The total cost is the cost of each path scaled by their probabilites
2145     unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
2146     unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor);
2147     UnpredCost = TUnpredCost + FUnpredCost;
2148     // When predicating assume that the first IT can be folded away but later
2149     // ones cost one cycle each
2150     if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2151       PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2152     }
2153   } else {
2154     unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor);
2155     unsigned FUnpredCost =
2156       Probability.getCompl().scale(FCycles * ScalingUpFactor);
2157     UnpredCost = TUnpredCost + FUnpredCost;
2158     UnpredCost += 1 * ScalingUpFactor; // The branch itself
2159     UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2160   }
2161 
2162   return PredCost <= UnpredCost;
2163 }
2164 
2165 unsigned
2166 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF,
2167                                                    unsigned NumInsts) const {
2168   // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions.
2169   // ARM has a condition code field in every predicable instruction, using it
2170   // doesn't change code size.
2171   if (!Subtarget.isThumb2())
2172     return 0;
2173 
2174   // It's possible that the size of the IT is restricted to a single block.
2175   unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2176   return divideCeil(NumInsts, MaxInsts) * 2;
2177 }
2178 
2179 unsigned
2180 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const {
2181   // If this branch is likely to be folded into the comparison to form a
2182   // CB(N)Z, then removing it won't reduce code size at all, because that will
2183   // just replace the CB(N)Z with a CMP.
2184   if (MI.getOpcode() == ARM::t2Bcc &&
2185       findCMPToFoldIntoCBZ(&MI, &getRegisterInfo()))
2186     return 0;
2187 
2188   unsigned Size = getInstSizeInBytes(MI);
2189 
2190   // For Thumb2, all branches are 32-bit instructions during the if conversion
2191   // pass, but may be replaced with 16-bit instructions during size reduction.
2192   // Since the branches considered by if conversion tend to be forward branches
2193   // over small basic blocks, they are very likely to be in range for the
2194   // narrow instructions, so we assume the final code size will be half what it
2195   // currently is.
2196   if (Subtarget.isThumb2())
2197     Size /= 2;
2198 
2199   return Size;
2200 }
2201 
2202 bool
2203 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
2204                                             MachineBasicBlock &FMBB) const {
2205   // Reduce false anti-dependencies to let the target's out-of-order execution
2206   // engine do its thing.
2207   return Subtarget.isProfitableToUnpredicate();
2208 }
2209 
2210 /// getInstrPredicate - If instruction is predicated, returns its predicate
2211 /// condition, otherwise returns AL. It also returns the condition code
2212 /// register by reference.
2213 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI,
2214                                          Register &PredReg) {
2215   int PIdx = MI.findFirstPredOperandIdx();
2216   if (PIdx == -1) {
2217     PredReg = 0;
2218     return ARMCC::AL;
2219   }
2220 
2221   PredReg = MI.getOperand(PIdx+1).getReg();
2222   return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
2223 }
2224 
2225 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {
2226   if (Opc == ARM::B)
2227     return ARM::Bcc;
2228   if (Opc == ARM::tB)
2229     return ARM::tBcc;
2230   if (Opc == ARM::t2B)
2231     return ARM::t2Bcc;
2232 
2233   llvm_unreachable("Unknown unconditional branch opcode!");
2234 }
2235 
2236 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
2237                                                        bool NewMI,
2238                                                        unsigned OpIdx1,
2239                                                        unsigned OpIdx2) const {
2240   switch (MI.getOpcode()) {
2241   case ARM::MOVCCr:
2242   case ARM::t2MOVCCr: {
2243     // MOVCC can be commuted by inverting the condition.
2244     Register PredReg;
2245     ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
2246     // MOVCC AL can't be inverted. Shouldn't happen.
2247     if (CC == ARMCC::AL || PredReg != ARM::CPSR)
2248       return nullptr;
2249     MachineInstr *CommutedMI =
2250         TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2251     if (!CommutedMI)
2252       return nullptr;
2253     // After swapping the MOVCC operands, also invert the condition.
2254     CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx())
2255         .setImm(ARMCC::getOppositeCondition(CC));
2256     return CommutedMI;
2257   }
2258   }
2259   return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2260 }
2261 
2262 /// Identify instructions that can be folded into a MOVCC instruction, and
2263 /// return the defining instruction.
2264 MachineInstr *
2265 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI,
2266                                    const TargetInstrInfo *TII) const {
2267   if (!Reg.isVirtual())
2268     return nullptr;
2269   if (!MRI.hasOneNonDBGUse(Reg))
2270     return nullptr;
2271   MachineInstr *MI = MRI.getVRegDef(Reg);
2272   if (!MI)
2273     return nullptr;
2274   // Check if MI can be predicated and folded into the MOVCC.
2275   if (!isPredicable(*MI))
2276     return nullptr;
2277   // Check if MI has any non-dead defs or physreg uses. This also detects
2278   // predicated instructions which will be reading CPSR.
2279   for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 1)) {
2280     // Reject frame index operands, PEI can't handle the predicated pseudos.
2281     if (MO.isFI() || MO.isCPI() || MO.isJTI())
2282       return nullptr;
2283     if (!MO.isReg())
2284       continue;
2285     // MI can't have any tied operands, that would conflict with predication.
2286     if (MO.isTied())
2287       return nullptr;
2288     if (Register::isPhysicalRegister(MO.getReg()))
2289       return nullptr;
2290     if (MO.isDef() && !MO.isDead())
2291       return nullptr;
2292   }
2293   bool DontMoveAcrossStores = true;
2294   if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
2295     return nullptr;
2296   return MI;
2297 }
2298 
2299 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI,
2300                                      SmallVectorImpl<MachineOperand> &Cond,
2301                                      unsigned &TrueOp, unsigned &FalseOp,
2302                                      bool &Optimizable) const {
2303   assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2304          "Unknown select instruction");
2305   // MOVCC operands:
2306   // 0: Def.
2307   // 1: True use.
2308   // 2: False use.
2309   // 3: Condition code.
2310   // 4: CPSR use.
2311   TrueOp = 1;
2312   FalseOp = 2;
2313   Cond.push_back(MI.getOperand(3));
2314   Cond.push_back(MI.getOperand(4));
2315   // We can always fold a def.
2316   Optimizable = true;
2317   return false;
2318 }
2319 
2320 MachineInstr *
2321 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
2322                                  SmallPtrSetImpl<MachineInstr *> &SeenMIs,
2323                                  bool PreferFalse) const {
2324   assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2325          "Unknown select instruction");
2326   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2327   MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this);
2328   bool Invert = !DefMI;
2329   if (!DefMI)
2330     DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this);
2331   if (!DefMI)
2332     return nullptr;
2333 
2334   // Find new register class to use.
2335   MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
2336   MachineOperand TrueReg = MI.getOperand(Invert ? 1 : 2);
2337   Register DestReg = MI.getOperand(0).getReg();
2338   const TargetRegisterClass *FalseClass = MRI.getRegClass(FalseReg.getReg());
2339   const TargetRegisterClass *TrueClass = MRI.getRegClass(TrueReg.getReg());
2340   if (!MRI.constrainRegClass(DestReg, FalseClass))
2341     return nullptr;
2342   if (!MRI.constrainRegClass(DestReg, TrueClass))
2343     return nullptr;
2344 
2345   // Create a new predicated version of DefMI.
2346   // Rfalse is the first use.
2347   MachineInstrBuilder NewMI =
2348       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
2349 
2350   // Copy all the DefMI operands, excluding its (null) predicate.
2351   const MCInstrDesc &DefDesc = DefMI->getDesc();
2352   for (unsigned i = 1, e = DefDesc.getNumOperands();
2353        i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
2354     NewMI.add(DefMI->getOperand(i));
2355 
2356   unsigned CondCode = MI.getOperand(3).getImm();
2357   if (Invert)
2358     NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
2359   else
2360     NewMI.addImm(CondCode);
2361   NewMI.add(MI.getOperand(4));
2362 
2363   // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
2364   if (NewMI->hasOptionalDef())
2365     NewMI.add(condCodeOp());
2366 
2367   // The output register value when the predicate is false is an implicit
2368   // register operand tied to the first def.
2369   // The tie makes the register allocator ensure the FalseReg is allocated the
2370   // same register as operand 0.
2371   FalseReg.setImplicit();
2372   NewMI.add(FalseReg);
2373   NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
2374 
2375   // Update SeenMIs set: register newly created MI and erase removed DefMI.
2376   SeenMIs.insert(NewMI);
2377   SeenMIs.erase(DefMI);
2378 
2379   // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
2380   // DefMI would be invalid when tranferred inside the loop.  Checking for a
2381   // loop is expensive, but at least remove kill flags if they are in different
2382   // BBs.
2383   if (DefMI->getParent() != MI.getParent())
2384     NewMI->clearKillInfo();
2385 
2386   // The caller will erase MI, but not DefMI.
2387   DefMI->eraseFromParent();
2388   return NewMI;
2389 }
2390 
2391 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
2392 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
2393 /// def operand.
2394 ///
2395 /// This will go away once we can teach tblgen how to set the optional CPSR def
2396 /// operand itself.
2397 struct AddSubFlagsOpcodePair {
2398   uint16_t PseudoOpc;
2399   uint16_t MachineOpc;
2400 };
2401 
2402 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
2403   {ARM::ADDSri, ARM::ADDri},
2404   {ARM::ADDSrr, ARM::ADDrr},
2405   {ARM::ADDSrsi, ARM::ADDrsi},
2406   {ARM::ADDSrsr, ARM::ADDrsr},
2407 
2408   {ARM::SUBSri, ARM::SUBri},
2409   {ARM::SUBSrr, ARM::SUBrr},
2410   {ARM::SUBSrsi, ARM::SUBrsi},
2411   {ARM::SUBSrsr, ARM::SUBrsr},
2412 
2413   {ARM::RSBSri, ARM::RSBri},
2414   {ARM::RSBSrsi, ARM::RSBrsi},
2415   {ARM::RSBSrsr, ARM::RSBrsr},
2416 
2417   {ARM::tADDSi3, ARM::tADDi3},
2418   {ARM::tADDSi8, ARM::tADDi8},
2419   {ARM::tADDSrr, ARM::tADDrr},
2420   {ARM::tADCS, ARM::tADC},
2421 
2422   {ARM::tSUBSi3, ARM::tSUBi3},
2423   {ARM::tSUBSi8, ARM::tSUBi8},
2424   {ARM::tSUBSrr, ARM::tSUBrr},
2425   {ARM::tSBCS, ARM::tSBC},
2426   {ARM::tRSBS, ARM::tRSB},
2427   {ARM::tLSLSri, ARM::tLSLri},
2428 
2429   {ARM::t2ADDSri, ARM::t2ADDri},
2430   {ARM::t2ADDSrr, ARM::t2ADDrr},
2431   {ARM::t2ADDSrs, ARM::t2ADDrs},
2432 
2433   {ARM::t2SUBSri, ARM::t2SUBri},
2434   {ARM::t2SUBSrr, ARM::t2SUBrr},
2435   {ARM::t2SUBSrs, ARM::t2SUBrs},
2436 
2437   {ARM::t2RSBSri, ARM::t2RSBri},
2438   {ARM::t2RSBSrs, ARM::t2RSBrs},
2439 };
2440 
2441 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
2442   for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
2443     if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
2444       return AddSubFlagsOpcodeMap[i].MachineOpc;
2445   return 0;
2446 }
2447 
2448 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
2449                                    MachineBasicBlock::iterator &MBBI,
2450                                    const DebugLoc &dl, Register DestReg,
2451                                    Register BaseReg, int NumBytes,
2452                                    ARMCC::CondCodes Pred, Register PredReg,
2453                                    const ARMBaseInstrInfo &TII,
2454                                    unsigned MIFlags) {
2455   if (NumBytes == 0 && DestReg != BaseReg) {
2456     BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
2457         .addReg(BaseReg, RegState::Kill)
2458         .add(predOps(Pred, PredReg))
2459         .add(condCodeOp())
2460         .setMIFlags(MIFlags);
2461     return;
2462   }
2463 
2464   bool isSub = NumBytes < 0;
2465   if (isSub) NumBytes = -NumBytes;
2466 
2467   while (NumBytes) {
2468     unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
2469     unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
2470     assert(ThisVal && "Didn't extract field correctly");
2471 
2472     // We will handle these bits from offset, clear them.
2473     NumBytes &= ~ThisVal;
2474 
2475     assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
2476 
2477     // Build the new ADD / SUB.
2478     unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2479     BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
2480         .addReg(BaseReg, RegState::Kill)
2481         .addImm(ThisVal)
2482         .add(predOps(Pred, PredReg))
2483         .add(condCodeOp())
2484         .setMIFlags(MIFlags);
2485     BaseReg = DestReg;
2486   }
2487 }
2488 
2489 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
2490                                       MachineFunction &MF, MachineInstr *MI,
2491                                       unsigned NumBytes) {
2492   // This optimisation potentially adds lots of load and store
2493   // micro-operations, it's only really a great benefit to code-size.
2494   if (!Subtarget.hasMinSize())
2495     return false;
2496 
2497   // If only one register is pushed/popped, LLVM can use an LDR/STR
2498   // instead. We can't modify those so make sure we're dealing with an
2499   // instruction we understand.
2500   bool IsPop = isPopOpcode(MI->getOpcode());
2501   bool IsPush = isPushOpcode(MI->getOpcode());
2502   if (!IsPush && !IsPop)
2503     return false;
2504 
2505   bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
2506                       MI->getOpcode() == ARM::VLDMDIA_UPD;
2507   bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
2508                      MI->getOpcode() == ARM::tPOP ||
2509                      MI->getOpcode() == ARM::tPOP_RET;
2510 
2511   assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
2512                           MI->getOperand(1).getReg() == ARM::SP)) &&
2513          "trying to fold sp update into non-sp-updating push/pop");
2514 
2515   // The VFP push & pop act on D-registers, so we can only fold an adjustment
2516   // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
2517   // if this is violated.
2518   if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2519     return false;
2520 
2521   // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
2522   // pred) so the list starts at 4. Thumb1 starts after the predicate.
2523   int RegListIdx = IsT1PushPop ? 2 : 4;
2524 
2525   // Calculate the space we'll need in terms of registers.
2526   unsigned RegsNeeded;
2527   const TargetRegisterClass *RegClass;
2528   if (IsVFPPushPop) {
2529     RegsNeeded = NumBytes / 8;
2530     RegClass = &ARM::DPRRegClass;
2531   } else {
2532     RegsNeeded = NumBytes / 4;
2533     RegClass = &ARM::GPRRegClass;
2534   }
2535 
2536   // We're going to have to strip all list operands off before
2537   // re-adding them since the order matters, so save the existing ones
2538   // for later.
2539   SmallVector<MachineOperand, 4> RegList;
2540 
2541   // We're also going to need the first register transferred by this
2542   // instruction, which won't necessarily be the first register in the list.
2543   unsigned FirstRegEnc = -1;
2544 
2545   const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
2546   for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2547     MachineOperand &MO = MI->getOperand(i);
2548     RegList.push_back(MO);
2549 
2550     if (MO.isReg() && !MO.isImplicit() &&
2551         TRI->getEncodingValue(MO.getReg()) < FirstRegEnc)
2552       FirstRegEnc = TRI->getEncodingValue(MO.getReg());
2553   }
2554 
2555   const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
2556 
2557   // Now try to find enough space in the reglist to allocate NumBytes.
2558   for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2559        --CurRegEnc) {
2560     unsigned CurReg = RegClass->getRegister(CurRegEnc);
2561     if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7))
2562       continue;
2563     if (!IsPop) {
2564       // Pushing any register is completely harmless, mark the register involved
2565       // as undef since we don't care about its value and must not restore it
2566       // during stack unwinding.
2567       RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
2568                                                   false, false, true));
2569       --RegsNeeded;
2570       continue;
2571     }
2572 
2573     // However, we can only pop an extra register if it's not live. For
2574     // registers live within the function we might clobber a return value
2575     // register; the other way a register can be live here is if it's
2576     // callee-saved.
2577     if (isCalleeSavedRegister(CurReg, CSRegs) ||
2578         MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) !=
2579         MachineBasicBlock::LQR_Dead) {
2580       // VFP pops don't allow holes in the register list, so any skip is fatal
2581       // for our transformation. GPR pops do, so we should just keep looking.
2582       if (IsVFPPushPop)
2583         return false;
2584       else
2585         continue;
2586     }
2587 
2588     // Mark the unimportant registers as <def,dead> in the POP.
2589     RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
2590                                                 true));
2591     --RegsNeeded;
2592   }
2593 
2594   if (RegsNeeded > 0)
2595     return false;
2596 
2597   // Finally we know we can profitably perform the optimisation so go
2598   // ahead: strip all existing registers off and add them back again
2599   // in the right order.
2600   for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
2601     MI->RemoveOperand(i);
2602 
2603   // Add the complete list back in.
2604   MachineInstrBuilder MIB(MF, &*MI);
2605   for (const MachineOperand &MO : llvm::reverse(RegList))
2606     MIB.add(MO);
2607 
2608   return true;
2609 }
2610 
2611 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2612                                 Register FrameReg, int &Offset,
2613                                 const ARMBaseInstrInfo &TII) {
2614   unsigned Opcode = MI.getOpcode();
2615   const MCInstrDesc &Desc = MI.getDesc();
2616   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
2617   bool isSub = false;
2618 
2619   // Memory operands in inline assembly always use AddrMode2.
2620   if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2621     AddrMode = ARMII::AddrMode2;
2622 
2623   if (Opcode == ARM::ADDri) {
2624     Offset += MI.getOperand(FrameRegIdx+1).getImm();
2625     if (Offset == 0) {
2626       // Turn it into a move.
2627       MI.setDesc(TII.get(ARM::MOVr));
2628       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2629       MI.RemoveOperand(FrameRegIdx+1);
2630       Offset = 0;
2631       return true;
2632     } else if (Offset < 0) {
2633       Offset = -Offset;
2634       isSub = true;
2635       MI.setDesc(TII.get(ARM::SUBri));
2636     }
2637 
2638     // Common case: small offset, fits into instruction.
2639     if (ARM_AM::getSOImmVal(Offset) != -1) {
2640       // Replace the FrameIndex with sp / fp
2641       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2642       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
2643       Offset = 0;
2644       return true;
2645     }
2646 
2647     // Otherwise, pull as much of the immedidate into this ADDri/SUBri
2648     // as possible.
2649     unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
2650     unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
2651 
2652     // We will handle these bits from offset, clear them.
2653     Offset &= ~ThisImmVal;
2654 
2655     // Get the properly encoded SOImmVal field.
2656     assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
2657            "Bit extraction didn't work?");
2658     MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2659  } else {
2660     unsigned ImmIdx = 0;
2661     int InstrOffs = 0;
2662     unsigned NumBits = 0;
2663     unsigned Scale = 1;
2664     switch (AddrMode) {
2665     case ARMII::AddrMode_i12:
2666       ImmIdx = FrameRegIdx + 1;
2667       InstrOffs = MI.getOperand(ImmIdx).getImm();
2668       NumBits = 12;
2669       break;
2670     case ARMII::AddrMode2:
2671       ImmIdx = FrameRegIdx+2;
2672       InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
2673       if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2674         InstrOffs *= -1;
2675       NumBits = 12;
2676       break;
2677     case ARMII::AddrMode3:
2678       ImmIdx = FrameRegIdx+2;
2679       InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
2680       if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2681         InstrOffs *= -1;
2682       NumBits = 8;
2683       break;
2684     case ARMII::AddrMode4:
2685     case ARMII::AddrMode6:
2686       // Can't fold any offset even if it's zero.
2687       return false;
2688     case ARMII::AddrMode5:
2689       ImmIdx = FrameRegIdx+1;
2690       InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2691       if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2692         InstrOffs *= -1;
2693       NumBits = 8;
2694       Scale = 4;
2695       break;
2696     case ARMII::AddrMode5FP16:
2697       ImmIdx = FrameRegIdx+1;
2698       InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2699       if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2700         InstrOffs *= -1;
2701       NumBits = 8;
2702       Scale = 2;
2703       break;
2704     case ARMII::AddrModeT2_i7:
2705     case ARMII::AddrModeT2_i7s2:
2706     case ARMII::AddrModeT2_i7s4:
2707       ImmIdx = FrameRegIdx+1;
2708       InstrOffs = MI.getOperand(ImmIdx).getImm();
2709       NumBits = 7;
2710       Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 :
2711                AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1);
2712       break;
2713     default:
2714       llvm_unreachable("Unsupported addressing mode!");
2715     }
2716 
2717     Offset += InstrOffs * Scale;
2718     assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
2719     if (Offset < 0) {
2720       Offset = -Offset;
2721       isSub = true;
2722     }
2723 
2724     // Attempt to fold address comp. if opcode has offset bits
2725     if (NumBits > 0) {
2726       // Common case: small offset, fits into instruction.
2727       MachineOperand &ImmOp = MI.getOperand(ImmIdx);
2728       int ImmedOffset = Offset / Scale;
2729       unsigned Mask = (1 << NumBits) - 1;
2730       if ((unsigned)Offset <= Mask * Scale) {
2731         // Replace the FrameIndex with sp
2732         MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2733         // FIXME: When addrmode2 goes away, this will simplify (like the
2734         // T2 version), as the LDR.i12 versions don't need the encoding
2735         // tricks for the offset value.
2736         if (isSub) {
2737           if (AddrMode == ARMII::AddrMode_i12)
2738             ImmedOffset = -ImmedOffset;
2739           else
2740             ImmedOffset |= 1 << NumBits;
2741         }
2742         ImmOp.ChangeToImmediate(ImmedOffset);
2743         Offset = 0;
2744         return true;
2745       }
2746 
2747       // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
2748       ImmedOffset = ImmedOffset & Mask;
2749       if (isSub) {
2750         if (AddrMode == ARMII::AddrMode_i12)
2751           ImmedOffset = -ImmedOffset;
2752         else
2753           ImmedOffset |= 1 << NumBits;
2754       }
2755       ImmOp.ChangeToImmediate(ImmedOffset);
2756       Offset &= ~(Mask*Scale);
2757     }
2758   }
2759 
2760   Offset = (isSub) ? -Offset : Offset;
2761   return Offset == 0;
2762 }
2763 
2764 /// analyzeCompare - For a comparison instruction, return the source registers
2765 /// in SrcReg and SrcReg2 if having two register operands, and the value it
2766 /// compares against in CmpValue. Return true if the comparison instruction
2767 /// can be analyzed.
2768 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
2769                                       Register &SrcReg2, int64_t &CmpMask,
2770                                       int64_t &CmpValue) const {
2771   switch (MI.getOpcode()) {
2772   default: break;
2773   case ARM::CMPri:
2774   case ARM::t2CMPri:
2775   case ARM::tCMPi8:
2776     SrcReg = MI.getOperand(0).getReg();
2777     SrcReg2 = 0;
2778     CmpMask = ~0;
2779     CmpValue = MI.getOperand(1).getImm();
2780     return true;
2781   case ARM::CMPrr:
2782   case ARM::t2CMPrr:
2783   case ARM::tCMPr:
2784     SrcReg = MI.getOperand(0).getReg();
2785     SrcReg2 = MI.getOperand(1).getReg();
2786     CmpMask = ~0;
2787     CmpValue = 0;
2788     return true;
2789   case ARM::TSTri:
2790   case ARM::t2TSTri:
2791     SrcReg = MI.getOperand(0).getReg();
2792     SrcReg2 = 0;
2793     CmpMask = MI.getOperand(1).getImm();
2794     CmpValue = 0;
2795     return true;
2796   }
2797 
2798   return false;
2799 }
2800 
2801 /// isSuitableForMask - Identify a suitable 'and' instruction that
2802 /// operates on the given source register and applies the same mask
2803 /// as a 'tst' instruction. Provide a limited look-through for copies.
2804 /// When successful, MI will hold the found instruction.
2805 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg,
2806                               int CmpMask, bool CommonUse) {
2807   switch (MI->getOpcode()) {
2808     case ARM::ANDri:
2809     case ARM::t2ANDri:
2810       if (CmpMask != MI->getOperand(2).getImm())
2811         return false;
2812       if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
2813         return true;
2814       break;
2815   }
2816 
2817   return false;
2818 }
2819 
2820 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return
2821 /// the condition code if we modify the instructions such that flags are
2822 /// set by ADD(a,b,X).
2823 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) {
2824   switch (CC) {
2825   default: return ARMCC::AL;
2826   case ARMCC::HS: return ARMCC::LO;
2827   case ARMCC::LO: return ARMCC::HS;
2828   case ARMCC::VS: return ARMCC::VS;
2829   case ARMCC::VC: return ARMCC::VC;
2830   }
2831 }
2832 
2833 /// isRedundantFlagInstr - check whether the first instruction, whose only
2834 /// purpose is to update flags, can be made redundant.
2835 /// CMPrr can be made redundant by SUBrr if the operands are the same.
2836 /// CMPri can be made redundant by SUBri if the operands are the same.
2837 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
2838 /// This function can be extended later on.
2839 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
2840                                         Register SrcReg, Register SrcReg2,
2841                                         int64_t ImmValue,
2842                                         const MachineInstr *OI,
2843                                         bool &IsThumb1) {
2844   if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2845       (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) &&
2846       ((OI->getOperand(1).getReg() == SrcReg &&
2847         OI->getOperand(2).getReg() == SrcReg2) ||
2848        (OI->getOperand(1).getReg() == SrcReg2 &&
2849         OI->getOperand(2).getReg() == SrcReg))) {
2850     IsThumb1 = false;
2851     return true;
2852   }
2853 
2854   if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr &&
2855       ((OI->getOperand(2).getReg() == SrcReg &&
2856         OI->getOperand(3).getReg() == SrcReg2) ||
2857        (OI->getOperand(2).getReg() == SrcReg2 &&
2858         OI->getOperand(3).getReg() == SrcReg))) {
2859     IsThumb1 = true;
2860     return true;
2861   }
2862 
2863   if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) &&
2864       (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) &&
2865       OI->getOperand(1).getReg() == SrcReg &&
2866       OI->getOperand(2).getImm() == ImmValue) {
2867     IsThumb1 = false;
2868     return true;
2869   }
2870 
2871   if (CmpI->getOpcode() == ARM::tCMPi8 &&
2872       (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) &&
2873       OI->getOperand(2).getReg() == SrcReg &&
2874       OI->getOperand(3).getImm() == ImmValue) {
2875     IsThumb1 = true;
2876     return true;
2877   }
2878 
2879   if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2880       (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr ||
2881        OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) &&
2882       OI->getOperand(0).isReg() && OI->getOperand(1).isReg() &&
2883       OI->getOperand(0).getReg() == SrcReg &&
2884       OI->getOperand(1).getReg() == SrcReg2) {
2885     IsThumb1 = false;
2886     return true;
2887   }
2888 
2889   if (CmpI->getOpcode() == ARM::tCMPr &&
2890       (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 ||
2891        OI->getOpcode() == ARM::tADDrr) &&
2892       OI->getOperand(0).getReg() == SrcReg &&
2893       OI->getOperand(2).getReg() == SrcReg2) {
2894     IsThumb1 = true;
2895     return true;
2896   }
2897 
2898   return false;
2899 }
2900 
2901 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
2902   switch (MI->getOpcode()) {
2903   default: return false;
2904   case ARM::tLSLri:
2905   case ARM::tLSRri:
2906   case ARM::tLSLrr:
2907   case ARM::tLSRrr:
2908   case ARM::tSUBrr:
2909   case ARM::tADDrr:
2910   case ARM::tADDi3:
2911   case ARM::tADDi8:
2912   case ARM::tSUBi3:
2913   case ARM::tSUBi8:
2914   case ARM::tMUL:
2915   case ARM::tADC:
2916   case ARM::tSBC:
2917   case ARM::tRSB:
2918   case ARM::tAND:
2919   case ARM::tORR:
2920   case ARM::tEOR:
2921   case ARM::tBIC:
2922   case ARM::tMVN:
2923   case ARM::tASRri:
2924   case ARM::tASRrr:
2925   case ARM::tROR:
2926     IsThumb1 = true;
2927     LLVM_FALLTHROUGH;
2928   case ARM::RSBrr:
2929   case ARM::RSBri:
2930   case ARM::RSCrr:
2931   case ARM::RSCri:
2932   case ARM::ADDrr:
2933   case ARM::ADDri:
2934   case ARM::ADCrr:
2935   case ARM::ADCri:
2936   case ARM::SUBrr:
2937   case ARM::SUBri:
2938   case ARM::SBCrr:
2939   case ARM::SBCri:
2940   case ARM::t2RSBri:
2941   case ARM::t2ADDrr:
2942   case ARM::t2ADDri:
2943   case ARM::t2ADCrr:
2944   case ARM::t2ADCri:
2945   case ARM::t2SUBrr:
2946   case ARM::t2SUBri:
2947   case ARM::t2SBCrr:
2948   case ARM::t2SBCri:
2949   case ARM::ANDrr:
2950   case ARM::ANDri:
2951   case ARM::t2ANDrr:
2952   case ARM::t2ANDri:
2953   case ARM::ORRrr:
2954   case ARM::ORRri:
2955   case ARM::t2ORRrr:
2956   case ARM::t2ORRri:
2957   case ARM::EORrr:
2958   case ARM::EORri:
2959   case ARM::t2EORrr:
2960   case ARM::t2EORri:
2961   case ARM::t2LSRri:
2962   case ARM::t2LSRrr:
2963   case ARM::t2LSLri:
2964   case ARM::t2LSLrr:
2965     return true;
2966   }
2967 }
2968 
2969 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
2970 /// comparison into one that sets the zero bit in the flags register;
2971 /// Remove a redundant Compare instruction if an earlier instruction can set the
2972 /// flags in the same way as Compare.
2973 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
2974 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
2975 /// condition code of instructions which use the flags.
2976 bool ARMBaseInstrInfo::optimizeCompareInstr(
2977     MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
2978     int64_t CmpValue, const MachineRegisterInfo *MRI) const {
2979   // Get the unique definition of SrcReg.
2980   MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
2981   if (!MI) return false;
2982 
2983   // Masked compares sometimes use the same register as the corresponding 'and'.
2984   if (CmpMask != ~0) {
2985     if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) {
2986       MI = nullptr;
2987       for (MachineRegisterInfo::use_instr_iterator
2988            UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
2989            UI != UE; ++UI) {
2990         if (UI->getParent() != CmpInstr.getParent())
2991           continue;
2992         MachineInstr *PotentialAND = &*UI;
2993         if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
2994             isPredicated(*PotentialAND))
2995           continue;
2996         MI = PotentialAND;
2997         break;
2998       }
2999       if (!MI) return false;
3000     }
3001   }
3002 
3003   // Get ready to iterate backward from CmpInstr.
3004   MachineBasicBlock::iterator I = CmpInstr, E = MI,
3005                               B = CmpInstr.getParent()->begin();
3006 
3007   // Early exit if CmpInstr is at the beginning of the BB.
3008   if (I == B) return false;
3009 
3010   // There are two possible candidates which can be changed to set CPSR:
3011   // One is MI, the other is a SUB or ADD instruction.
3012   // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or
3013   // ADDr[ri](r1, r2, X).
3014   // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
3015   MachineInstr *SubAdd = nullptr;
3016   if (SrcReg2 != 0)
3017     // MI is not a candidate for CMPrr.
3018     MI = nullptr;
3019   else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
3020     // Conservatively refuse to convert an instruction which isn't in the same
3021     // BB as the comparison.
3022     // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate.
3023     // Thus we cannot return here.
3024     if (CmpInstr.getOpcode() == ARM::CMPri ||
3025         CmpInstr.getOpcode() == ARM::t2CMPri ||
3026         CmpInstr.getOpcode() == ARM::tCMPi8)
3027       MI = nullptr;
3028     else
3029       return false;
3030   }
3031 
3032   bool IsThumb1 = false;
3033   if (MI && !isOptimizeCompareCandidate(MI, IsThumb1))
3034     return false;
3035 
3036   // We also want to do this peephole for cases like this: if (a*b == 0),
3037   // and optimise away the CMP instruction from the generated code sequence:
3038   // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values
3039   // resulting from the select instruction, but these MOVS instructions for
3040   // Thumb1 (V6M) are flag setting and are thus preventing this optimisation.
3041   // However, if we only have MOVS instructions in between the CMP and the
3042   // other instruction (the MULS in this example), then the CPSR is dead so we
3043   // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this
3044   // reordering and then continue the analysis hoping we can eliminate the
3045   // CMP. This peephole works on the vregs, so is still in SSA form. As a
3046   // consequence, the movs won't redefine/kill the MUL operands which would
3047   // make this reordering illegal.
3048   const TargetRegisterInfo *TRI = &getRegisterInfo();
3049   if (MI && IsThumb1) {
3050     --I;
3051     if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) {
3052       bool CanReorder = true;
3053       for (; I != E; --I) {
3054         if (I->getOpcode() != ARM::tMOVi8) {
3055           CanReorder = false;
3056           break;
3057         }
3058       }
3059       if (CanReorder) {
3060         MI = MI->removeFromParent();
3061         E = CmpInstr;
3062         CmpInstr.getParent()->insert(E, MI);
3063       }
3064     }
3065     I = CmpInstr;
3066     E = MI;
3067   }
3068 
3069   // Check that CPSR isn't set between the comparison instruction and the one we
3070   // want to change. At the same time, search for SubAdd.
3071   bool SubAddIsThumb1 = false;
3072   do {
3073     const MachineInstr &Instr = *--I;
3074 
3075     // Check whether CmpInstr can be made redundant by the current instruction.
3076     if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr,
3077                              SubAddIsThumb1)) {
3078       SubAdd = &*I;
3079       break;
3080     }
3081 
3082     // Allow E (which was initially MI) to be SubAdd but do not search before E.
3083     if (I == E)
3084       break;
3085 
3086     if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
3087         Instr.readsRegister(ARM::CPSR, TRI))
3088       // This instruction modifies or uses CPSR after the one we want to
3089       // change. We can't do this transformation.
3090       return false;
3091 
3092     if (I == B) {
3093       // In some cases, we scan the use-list of an instruction for an AND;
3094       // that AND is in the same BB, but may not be scheduled before the
3095       // corresponding TST.  In that case, bail out.
3096       //
3097       // FIXME: We could try to reschedule the AND.
3098       return false;
3099     }
3100   } while (true);
3101 
3102   // Return false if no candidates exist.
3103   if (!MI && !SubAdd)
3104     return false;
3105 
3106   // If we found a SubAdd, use it as it will be closer to the CMP
3107   if (SubAdd) {
3108     MI = SubAdd;
3109     IsThumb1 = SubAddIsThumb1;
3110   }
3111 
3112   // We can't use a predicated instruction - it doesn't always write the flags.
3113   if (isPredicated(*MI))
3114     return false;
3115 
3116   // Scan forward for the use of CPSR
3117   // When checking against MI: if it's a conditional code that requires
3118   // checking of the V bit or C bit, then this is not safe to do.
3119   // It is safe to remove CmpInstr if CPSR is redefined or killed.
3120   // If we are done with the basic block, we need to check whether CPSR is
3121   // live-out.
3122   SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
3123       OperandsToUpdate;
3124   bool isSafe = false;
3125   I = CmpInstr;
3126   E = CmpInstr.getParent()->end();
3127   while (!isSafe && ++I != E) {
3128     const MachineInstr &Instr = *I;
3129     for (unsigned IO = 0, EO = Instr.getNumOperands();
3130          !isSafe && IO != EO; ++IO) {
3131       const MachineOperand &MO = Instr.getOperand(IO);
3132       if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
3133         isSafe = true;
3134         break;
3135       }
3136       if (!MO.isReg() || MO.getReg() != ARM::CPSR)
3137         continue;
3138       if (MO.isDef()) {
3139         isSafe = true;
3140         break;
3141       }
3142       // Condition code is after the operand before CPSR except for VSELs.
3143       ARMCC::CondCodes CC;
3144       bool IsInstrVSel = true;
3145       switch (Instr.getOpcode()) {
3146       default:
3147         IsInstrVSel = false;
3148         CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
3149         break;
3150       case ARM::VSELEQD:
3151       case ARM::VSELEQS:
3152       case ARM::VSELEQH:
3153         CC = ARMCC::EQ;
3154         break;
3155       case ARM::VSELGTD:
3156       case ARM::VSELGTS:
3157       case ARM::VSELGTH:
3158         CC = ARMCC::GT;
3159         break;
3160       case ARM::VSELGED:
3161       case ARM::VSELGES:
3162       case ARM::VSELGEH:
3163         CC = ARMCC::GE;
3164         break;
3165       case ARM::VSELVSD:
3166       case ARM::VSELVSS:
3167       case ARM::VSELVSH:
3168         CC = ARMCC::VS;
3169         break;
3170       }
3171 
3172       if (SubAdd) {
3173         // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
3174         // on CMP needs to be updated to be based on SUB.
3175         // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also
3176         // needs to be modified.
3177         // Push the condition code operands to OperandsToUpdate.
3178         // If it is safe to remove CmpInstr, the condition code of these
3179         // operands will be modified.
3180         unsigned Opc = SubAdd->getOpcode();
3181         bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3182                      Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3183                      Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3184                      Opc == ARM::tSUBi8;
3185         unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3186         if (!IsSub ||
3187             (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 &&
3188              SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) {
3189           // VSel doesn't support condition code update.
3190           if (IsInstrVSel)
3191             return false;
3192           // Ensure we can swap the condition.
3193           ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC));
3194           if (NewCC == ARMCC::AL)
3195             return false;
3196           OperandsToUpdate.push_back(
3197               std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3198         }
3199       } else {
3200         // No SubAdd, so this is x = <op> y, z; cmp x, 0.
3201         switch (CC) {
3202         case ARMCC::EQ: // Z
3203         case ARMCC::NE: // Z
3204         case ARMCC::MI: // N
3205         case ARMCC::PL: // N
3206         case ARMCC::AL: // none
3207           // CPSR can be used multiple times, we should continue.
3208           break;
3209         case ARMCC::HS: // C
3210         case ARMCC::LO: // C
3211         case ARMCC::VS: // V
3212         case ARMCC::VC: // V
3213         case ARMCC::HI: // C Z
3214         case ARMCC::LS: // C Z
3215         case ARMCC::GE: // N V
3216         case ARMCC::LT: // N V
3217         case ARMCC::GT: // Z N V
3218         case ARMCC::LE: // Z N V
3219           // The instruction uses the V bit or C bit which is not safe.
3220           return false;
3221         }
3222       }
3223     }
3224   }
3225 
3226   // If CPSR is not killed nor re-defined, we should check whether it is
3227   // live-out. If it is live-out, do not optimize.
3228   if (!isSafe) {
3229     MachineBasicBlock *MBB = CmpInstr.getParent();
3230     for (MachineBasicBlock *Succ : MBB->successors())
3231       if (Succ->isLiveIn(ARM::CPSR))
3232         return false;
3233   }
3234 
3235   // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always
3236   // set CPSR so this is represented as an explicit output)
3237   if (!IsThumb1) {
3238     MI->getOperand(5).setReg(ARM::CPSR);
3239     MI->getOperand(5).setIsDef(true);
3240   }
3241   assert(!isPredicated(*MI) && "Can't use flags from predicated instruction");
3242   CmpInstr.eraseFromParent();
3243 
3244   // Modify the condition code of operands in OperandsToUpdate.
3245   // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
3246   // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
3247   for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
3248     OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3249 
3250   MI->clearRegisterDeads(ARM::CPSR);
3251 
3252   return true;
3253 }
3254 
3255 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {
3256   // Do not sink MI if it might be used to optimize a redundant compare.
3257   // We heuristically only look at the instruction immediately following MI to
3258   // avoid potentially searching the entire basic block.
3259   if (isPredicated(MI))
3260     return true;
3261   MachineBasicBlock::const_iterator Next = &MI;
3262   ++Next;
3263   Register SrcReg, SrcReg2;
3264   int64_t CmpMask, CmpValue;
3265   bool IsThumb1;
3266   if (Next != MI.getParent()->end() &&
3267       analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) &&
3268       isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1))
3269     return false;
3270   return true;
3271 }
3272 
3273 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
3274                                      Register Reg,
3275                                      MachineRegisterInfo *MRI) const {
3276   // Fold large immediates into add, sub, or, xor.
3277   unsigned DefOpc = DefMI.getOpcode();
3278   if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
3279     return false;
3280   if (!DefMI.getOperand(1).isImm())
3281     // Could be t2MOVi32imm @xx
3282     return false;
3283 
3284   if (!MRI->hasOneNonDBGUse(Reg))
3285     return false;
3286 
3287   const MCInstrDesc &DefMCID = DefMI.getDesc();
3288   if (DefMCID.hasOptionalDef()) {
3289     unsigned NumOps = DefMCID.getNumOperands();
3290     const MachineOperand &MO = DefMI.getOperand(NumOps - 1);
3291     if (MO.getReg() == ARM::CPSR && !MO.isDead())
3292       // If DefMI defines CPSR and it is not dead, it's obviously not safe
3293       // to delete DefMI.
3294       return false;
3295   }
3296 
3297   const MCInstrDesc &UseMCID = UseMI.getDesc();
3298   if (UseMCID.hasOptionalDef()) {
3299     unsigned NumOps = UseMCID.getNumOperands();
3300     if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR)
3301       // If the instruction sets the flag, do not attempt this optimization
3302       // since it may change the semantics of the code.
3303       return false;
3304   }
3305 
3306   unsigned UseOpc = UseMI.getOpcode();
3307   unsigned NewUseOpc = 0;
3308   uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm();
3309   uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3310   bool Commute = false;
3311   switch (UseOpc) {
3312   default: return false;
3313   case ARM::SUBrr:
3314   case ARM::ADDrr:
3315   case ARM::ORRrr:
3316   case ARM::EORrr:
3317   case ARM::t2SUBrr:
3318   case ARM::t2ADDrr:
3319   case ARM::t2ORRrr:
3320   case ARM::t2EORrr: {
3321     Commute = UseMI.getOperand(2).getReg() != Reg;
3322     switch (UseOpc) {
3323     default: break;
3324     case ARM::ADDrr:
3325     case ARM::SUBrr:
3326       if (UseOpc == ARM::SUBrr && Commute)
3327         return false;
3328 
3329       // ADD/SUB are special because they're essentially the same operation, so
3330       // we can handle a larger range of immediates.
3331       if (ARM_AM::isSOImmTwoPartVal(ImmVal))
3332         NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3333       else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) {
3334         ImmVal = -ImmVal;
3335         NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3336       } else
3337         return false;
3338       SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3339       SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3340       break;
3341     case ARM::ORRrr:
3342     case ARM::EORrr:
3343       if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
3344         return false;
3345       SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3346       SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3347       switch (UseOpc) {
3348       default: break;
3349       case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
3350       case ARM::EORrr: NewUseOpc = ARM::EORri; break;
3351       }
3352       break;
3353     case ARM::t2ADDrr:
3354     case ARM::t2SUBrr: {
3355       if (UseOpc == ARM::t2SUBrr && Commute)
3356         return false;
3357 
3358       // ADD/SUB are special because they're essentially the same operation, so
3359       // we can handle a larger range of immediates.
3360       const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP;
3361       const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3362       const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3363       if (ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3364         NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3365       else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) {
3366         ImmVal = -ImmVal;
3367         NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3368       } else
3369         return false;
3370       SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3371       SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3372       break;
3373     }
3374     case ARM::t2ORRrr:
3375     case ARM::t2EORrr:
3376       if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3377         return false;
3378       SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3379       SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3380       switch (UseOpc) {
3381       default: break;
3382       case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
3383       case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
3384       }
3385       break;
3386     }
3387   }
3388   }
3389 
3390   unsigned OpIdx = Commute ? 2 : 1;
3391   Register Reg1 = UseMI.getOperand(OpIdx).getReg();
3392   bool isKill = UseMI.getOperand(OpIdx).isKill();
3393   const TargetRegisterClass *TRC = MRI->getRegClass(Reg);
3394   Register NewReg = MRI->createVirtualRegister(TRC);
3395   BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc),
3396           NewReg)
3397       .addReg(Reg1, getKillRegState(isKill))
3398       .addImm(SOImmValV1)
3399       .add(predOps(ARMCC::AL))
3400       .add(condCodeOp());
3401   UseMI.setDesc(get(NewUseOpc));
3402   UseMI.getOperand(1).setReg(NewReg);
3403   UseMI.getOperand(1).setIsKill();
3404   UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3405   DefMI.eraseFromParent();
3406   // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP.
3407   // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm].
3408   // Then the below code will not be needed, as the input/output register
3409   // classes will be rgpr or gprSP.
3410   // For now, we fix the UseMI operand explicitly here:
3411   switch(NewUseOpc){
3412     case ARM::t2ADDspImm:
3413     case ARM::t2SUBspImm:
3414     case ARM::t2ADDri:
3415     case ARM::t2SUBri:
3416       MRI->constrainRegClass(UseMI.getOperand(0).getReg(), TRC);
3417   }
3418   return true;
3419 }
3420 
3421 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
3422                                         const MachineInstr &MI) {
3423   switch (MI.getOpcode()) {
3424   default: {
3425     const MCInstrDesc &Desc = MI.getDesc();
3426     int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
3427     assert(UOps >= 0 && "bad # UOps");
3428     return UOps;
3429   }
3430 
3431   case ARM::LDRrs:
3432   case ARM::LDRBrs:
3433   case ARM::STRrs:
3434   case ARM::STRBrs: {
3435     unsigned ShOpVal = MI.getOperand(3).getImm();
3436     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3437     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3438     if (!isSub &&
3439         (ShImm == 0 ||
3440          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3441           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3442       return 1;
3443     return 2;
3444   }
3445 
3446   case ARM::LDRH:
3447   case ARM::STRH: {
3448     if (!MI.getOperand(2).getReg())
3449       return 1;
3450 
3451     unsigned ShOpVal = MI.getOperand(3).getImm();
3452     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3453     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3454     if (!isSub &&
3455         (ShImm == 0 ||
3456          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3457           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3458       return 1;
3459     return 2;
3460   }
3461 
3462   case ARM::LDRSB:
3463   case ARM::LDRSH:
3464     return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2;
3465 
3466   case ARM::LDRSB_POST:
3467   case ARM::LDRSH_POST: {
3468     Register Rt = MI.getOperand(0).getReg();
3469     Register Rm = MI.getOperand(3).getReg();
3470     return (Rt == Rm) ? 4 : 3;
3471   }
3472 
3473   case ARM::LDR_PRE_REG:
3474   case ARM::LDRB_PRE_REG: {
3475     Register Rt = MI.getOperand(0).getReg();
3476     Register Rm = MI.getOperand(3).getReg();
3477     if (Rt == Rm)
3478       return 3;
3479     unsigned ShOpVal = MI.getOperand(4).getImm();
3480     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3481     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3482     if (!isSub &&
3483         (ShImm == 0 ||
3484          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3485           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3486       return 2;
3487     return 3;
3488   }
3489 
3490   case ARM::STR_PRE_REG:
3491   case ARM::STRB_PRE_REG: {
3492     unsigned ShOpVal = MI.getOperand(4).getImm();
3493     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3494     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3495     if (!isSub &&
3496         (ShImm == 0 ||
3497          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3498           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3499       return 2;
3500     return 3;
3501   }
3502 
3503   case ARM::LDRH_PRE:
3504   case ARM::STRH_PRE: {
3505     Register Rt = MI.getOperand(0).getReg();
3506     Register Rm = MI.getOperand(3).getReg();
3507     if (!Rm)
3508       return 2;
3509     if (Rt == Rm)
3510       return 3;
3511     return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2;
3512   }
3513 
3514   case ARM::LDR_POST_REG:
3515   case ARM::LDRB_POST_REG:
3516   case ARM::LDRH_POST: {
3517     Register Rt = MI.getOperand(0).getReg();
3518     Register Rm = MI.getOperand(3).getReg();
3519     return (Rt == Rm) ? 3 : 2;
3520   }
3521 
3522   case ARM::LDR_PRE_IMM:
3523   case ARM::LDRB_PRE_IMM:
3524   case ARM::LDR_POST_IMM:
3525   case ARM::LDRB_POST_IMM:
3526   case ARM::STRB_POST_IMM:
3527   case ARM::STRB_POST_REG:
3528   case ARM::STRB_PRE_IMM:
3529   case ARM::STRH_POST:
3530   case ARM::STR_POST_IMM:
3531   case ARM::STR_POST_REG:
3532   case ARM::STR_PRE_IMM:
3533     return 2;
3534 
3535   case ARM::LDRSB_PRE:
3536   case ARM::LDRSH_PRE: {
3537     Register Rm = MI.getOperand(3).getReg();
3538     if (Rm == 0)
3539       return 3;
3540     Register Rt = MI.getOperand(0).getReg();
3541     if (Rt == Rm)
3542       return 4;
3543     unsigned ShOpVal = MI.getOperand(4).getImm();
3544     bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3545     unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3546     if (!isSub &&
3547         (ShImm == 0 ||
3548          ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3549           ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3550       return 3;
3551     return 4;
3552   }
3553 
3554   case ARM::LDRD: {
3555     Register Rt = MI.getOperand(0).getReg();
3556     Register Rn = MI.getOperand(2).getReg();
3557     Register Rm = MI.getOperand(3).getReg();
3558     if (Rm)
3559       return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3560                                                                           : 3;
3561     return (Rt == Rn) ? 3 : 2;
3562   }
3563 
3564   case ARM::STRD: {
3565     Register Rm = MI.getOperand(3).getReg();
3566     if (Rm)
3567       return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3568                                                                           : 3;
3569     return 2;
3570   }
3571 
3572   case ARM::LDRD_POST:
3573   case ARM::t2LDRD_POST:
3574     return 3;
3575 
3576   case ARM::STRD_POST:
3577   case ARM::t2STRD_POST:
3578     return 4;
3579 
3580   case ARM::LDRD_PRE: {
3581     Register Rt = MI.getOperand(0).getReg();
3582     Register Rn = MI.getOperand(3).getReg();
3583     Register Rm = MI.getOperand(4).getReg();
3584     if (Rm)
3585       return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3586                                                                           : 4;
3587     return (Rt == Rn) ? 4 : 3;
3588   }
3589 
3590   case ARM::t2LDRD_PRE: {
3591     Register Rt = MI.getOperand(0).getReg();
3592     Register Rn = MI.getOperand(3).getReg();
3593     return (Rt == Rn) ? 4 : 3;
3594   }
3595 
3596   case ARM::STRD_PRE: {
3597     Register Rm = MI.getOperand(4).getReg();
3598     if (Rm)
3599       return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3600                                                                           : 4;
3601     return 3;
3602   }
3603 
3604   case ARM::t2STRD_PRE:
3605     return 3;
3606 
3607   case ARM::t2LDR_POST:
3608   case ARM::t2LDRB_POST:
3609   case ARM::t2LDRB_PRE:
3610   case ARM::t2LDRSBi12:
3611   case ARM::t2LDRSBi8:
3612   case ARM::t2LDRSBpci:
3613   case ARM::t2LDRSBs:
3614   case ARM::t2LDRH_POST:
3615   case ARM::t2LDRH_PRE:
3616   case ARM::t2LDRSBT:
3617   case ARM::t2LDRSB_POST:
3618   case ARM::t2LDRSB_PRE:
3619   case ARM::t2LDRSH_POST:
3620   case ARM::t2LDRSH_PRE:
3621   case ARM::t2LDRSHi12:
3622   case ARM::t2LDRSHi8:
3623   case ARM::t2LDRSHpci:
3624   case ARM::t2LDRSHs:
3625     return 2;
3626 
3627   case ARM::t2LDRDi8: {
3628     Register Rt = MI.getOperand(0).getReg();
3629     Register Rn = MI.getOperand(2).getReg();
3630     return (Rt == Rn) ? 3 : 2;
3631   }
3632 
3633   case ARM::t2STRB_POST:
3634   case ARM::t2STRB_PRE:
3635   case ARM::t2STRBs:
3636   case ARM::t2STRDi8:
3637   case ARM::t2STRH_POST:
3638   case ARM::t2STRH_PRE:
3639   case ARM::t2STRHs:
3640   case ARM::t2STR_POST:
3641   case ARM::t2STR_PRE:
3642   case ARM::t2STRs:
3643     return 2;
3644   }
3645 }
3646 
3647 // Return the number of 32-bit words loaded by LDM or stored by STM. If this
3648 // can't be easily determined return 0 (missing MachineMemOperand).
3649 //
3650 // FIXME: The current MachineInstr design does not support relying on machine
3651 // mem operands to determine the width of a memory access. Instead, we expect
3652 // the target to provide this information based on the instruction opcode and
3653 // operands. However, using MachineMemOperand is the best solution now for
3654 // two reasons:
3655 //
3656 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
3657 // operands. This is much more dangerous than using the MachineMemOperand
3658 // sizes because CodeGen passes can insert/remove optional machine operands. In
3659 // fact, it's totally incorrect for preRA passes and appears to be wrong for
3660 // postRA passes as well.
3661 //
3662 // 2) getNumLDMAddresses is only used by the scheduling machine model and any
3663 // machine model that calls this should handle the unknown (zero size) case.
3664 //
3665 // Long term, we should require a target hook that verifies MachineMemOperand
3666 // sizes during MC lowering. That target hook should be local to MC lowering
3667 // because we can't ensure that it is aware of other MI forms. Doing this will
3668 // ensure that MachineMemOperands are correctly propagated through all passes.
3669 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const {
3670   unsigned Size = 0;
3671   for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
3672                                   E = MI.memoperands_end();
3673        I != E; ++I) {
3674     Size += (*I)->getSize();
3675   }
3676   // FIXME: The scheduler currently can't handle values larger than 16. But
3677   // the values can actually go up to 32 for floating-point load/store
3678   // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory
3679   // operations isn't right; we could end up with "extra" memory operands for
3680   // various reasons, like tail merge merging two memory operations.
3681   return std::min(Size / 4, 16U);
3682 }
3683 
3684 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
3685                                                     unsigned NumRegs) {
3686   unsigned UOps = 1 + NumRegs; // 1 for address computation.
3687   switch (Opc) {
3688   default:
3689     break;
3690   case ARM::VLDMDIA_UPD:
3691   case ARM::VLDMDDB_UPD:
3692   case ARM::VLDMSIA_UPD:
3693   case ARM::VLDMSDB_UPD:
3694   case ARM::VSTMDIA_UPD:
3695   case ARM::VSTMDDB_UPD:
3696   case ARM::VSTMSIA_UPD:
3697   case ARM::VSTMSDB_UPD:
3698   case ARM::LDMIA_UPD:
3699   case ARM::LDMDA_UPD:
3700   case ARM::LDMDB_UPD:
3701   case ARM::LDMIB_UPD:
3702   case ARM::STMIA_UPD:
3703   case ARM::STMDA_UPD:
3704   case ARM::STMDB_UPD:
3705   case ARM::STMIB_UPD:
3706   case ARM::tLDMIA_UPD:
3707   case ARM::tSTMIA_UPD:
3708   case ARM::t2LDMIA_UPD:
3709   case ARM::t2LDMDB_UPD:
3710   case ARM::t2STMIA_UPD:
3711   case ARM::t2STMDB_UPD:
3712     ++UOps; // One for base register writeback.
3713     break;
3714   case ARM::LDMIA_RET:
3715   case ARM::tPOP_RET:
3716   case ARM::t2LDMIA_RET:
3717     UOps += 2; // One for base reg wb, one for write to pc.
3718     break;
3719   }
3720   return UOps;
3721 }
3722 
3723 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
3724                                           const MachineInstr &MI) const {
3725   if (!ItinData || ItinData->isEmpty())
3726     return 1;
3727 
3728   const MCInstrDesc &Desc = MI.getDesc();
3729   unsigned Class = Desc.getSchedClass();
3730   int ItinUOps = ItinData->getNumMicroOps(Class);
3731   if (ItinUOps >= 0) {
3732     if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
3733       return getNumMicroOpsSwiftLdSt(ItinData, MI);
3734 
3735     return ItinUOps;
3736   }
3737 
3738   unsigned Opc = MI.getOpcode();
3739   switch (Opc) {
3740   default:
3741     llvm_unreachable("Unexpected multi-uops instruction!");
3742   case ARM::VLDMQIA:
3743   case ARM::VSTMQIA:
3744     return 2;
3745 
3746   // The number of uOps for load / store multiple are determined by the number
3747   // registers.
3748   //
3749   // On Cortex-A8, each pair of register loads / stores can be scheduled on the
3750   // same cycle. The scheduling for the first load / store must be done
3751   // separately by assuming the address is not 64-bit aligned.
3752   //
3753   // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
3754   // is not 64-bit aligned, then AGU would take an extra cycle.  For VFP / NEON
3755   // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
3756   case ARM::VLDMDIA:
3757   case ARM::VLDMDIA_UPD:
3758   case ARM::VLDMDDB_UPD:
3759   case ARM::VLDMSIA:
3760   case ARM::VLDMSIA_UPD:
3761   case ARM::VLDMSDB_UPD:
3762   case ARM::VSTMDIA:
3763   case ARM::VSTMDIA_UPD:
3764   case ARM::VSTMDDB_UPD:
3765   case ARM::VSTMSIA:
3766   case ARM::VSTMSIA_UPD:
3767   case ARM::VSTMSDB_UPD: {
3768     unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands();
3769     return (NumRegs / 2) + (NumRegs % 2) + 1;
3770   }
3771 
3772   case ARM::LDMIA_RET:
3773   case ARM::LDMIA:
3774   case ARM::LDMDA:
3775   case ARM::LDMDB:
3776   case ARM::LDMIB:
3777   case ARM::LDMIA_UPD:
3778   case ARM::LDMDA_UPD:
3779   case ARM::LDMDB_UPD:
3780   case ARM::LDMIB_UPD:
3781   case ARM::STMIA:
3782   case ARM::STMDA:
3783   case ARM::STMDB:
3784   case ARM::STMIB:
3785   case ARM::STMIA_UPD:
3786   case ARM::STMDA_UPD:
3787   case ARM::STMDB_UPD:
3788   case ARM::STMIB_UPD:
3789   case ARM::tLDMIA:
3790   case ARM::tLDMIA_UPD:
3791   case ARM::tSTMIA_UPD:
3792   case ARM::tPOP_RET:
3793   case ARM::tPOP:
3794   case ARM::tPUSH:
3795   case ARM::t2LDMIA_RET:
3796   case ARM::t2LDMIA:
3797   case ARM::t2LDMDB:
3798   case ARM::t2LDMIA_UPD:
3799   case ARM::t2LDMDB_UPD:
3800   case ARM::t2STMIA:
3801   case ARM::t2STMDB:
3802   case ARM::t2STMIA_UPD:
3803   case ARM::t2STMDB_UPD: {
3804     unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1;
3805     switch (Subtarget.getLdStMultipleTiming()) {
3806     case ARMSubtarget::SingleIssuePlusExtras:
3807       return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs);
3808     case ARMSubtarget::SingleIssue:
3809       // Assume the worst.
3810       return NumRegs;
3811     case ARMSubtarget::DoubleIssue: {
3812       if (NumRegs < 4)
3813         return 2;
3814       // 4 registers would be issued: 2, 2.
3815       // 5 registers would be issued: 2, 2, 1.
3816       unsigned UOps = (NumRegs / 2);
3817       if (NumRegs % 2)
3818         ++UOps;
3819       return UOps;
3820     }
3821     case ARMSubtarget::DoubleIssueCheckUnalignedAccess: {
3822       unsigned UOps = (NumRegs / 2);
3823       // If there are odd number of registers or if it's not 64-bit aligned,
3824       // then it takes an extra AGU (Address Generation Unit) cycle.
3825       if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
3826           (*MI.memoperands_begin())->getAlign() < Align(8))
3827         ++UOps;
3828       return UOps;
3829       }
3830     }
3831   }
3832   }
3833   llvm_unreachable("Didn't find the number of microops");
3834 }
3835 
3836 int
3837 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
3838                                   const MCInstrDesc &DefMCID,
3839                                   unsigned DefClass,
3840                                   unsigned DefIdx, unsigned DefAlign) const {
3841   int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3842   if (RegNo <= 0)
3843     // Def is the address writeback.
3844     return ItinData->getOperandCycle(DefClass, DefIdx);
3845 
3846   int DefCycle;
3847   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3848     // (regno / 2) + (regno % 2) + 1
3849     DefCycle = RegNo / 2 + 1;
3850     if (RegNo % 2)
3851       ++DefCycle;
3852   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3853     DefCycle = RegNo;
3854     bool isSLoad = false;
3855 
3856     switch (DefMCID.getOpcode()) {
3857     default: break;
3858     case ARM::VLDMSIA:
3859     case ARM::VLDMSIA_UPD:
3860     case ARM::VLDMSDB_UPD:
3861       isSLoad = true;
3862       break;
3863     }
3864 
3865     // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3866     // then it takes an extra cycle.
3867     if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3868       ++DefCycle;
3869   } else {
3870     // Assume the worst.
3871     DefCycle = RegNo + 2;
3872   }
3873 
3874   return DefCycle;
3875 }
3876 
3877 int
3878 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
3879                                  const MCInstrDesc &DefMCID,
3880                                  unsigned DefClass,
3881                                  unsigned DefIdx, unsigned DefAlign) const {
3882   int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3883   if (RegNo <= 0)
3884     // Def is the address writeback.
3885     return ItinData->getOperandCycle(DefClass, DefIdx);
3886 
3887   int DefCycle;
3888   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3889     // 4 registers would be issued: 1, 2, 1.
3890     // 5 registers would be issued: 1, 2, 2.
3891     DefCycle = RegNo / 2;
3892     if (DefCycle < 1)
3893       DefCycle = 1;
3894     // Result latency is issue cycle + 2: E2.
3895     DefCycle += 2;
3896   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3897     DefCycle = (RegNo / 2);
3898     // If there are odd number of registers or if it's not 64-bit aligned,
3899     // then it takes an extra AGU (Address Generation Unit) cycle.
3900     if ((RegNo % 2) || DefAlign < 8)
3901       ++DefCycle;
3902     // Result latency is AGU cycles + 2.
3903     DefCycle += 2;
3904   } else {
3905     // Assume the worst.
3906     DefCycle = RegNo + 2;
3907   }
3908 
3909   return DefCycle;
3910 }
3911 
3912 int
3913 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
3914                                   const MCInstrDesc &UseMCID,
3915                                   unsigned UseClass,
3916                                   unsigned UseIdx, unsigned UseAlign) const {
3917   int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3918   if (RegNo <= 0)
3919     return ItinData->getOperandCycle(UseClass, UseIdx);
3920 
3921   int UseCycle;
3922   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3923     // (regno / 2) + (regno % 2) + 1
3924     UseCycle = RegNo / 2 + 1;
3925     if (RegNo % 2)
3926       ++UseCycle;
3927   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3928     UseCycle = RegNo;
3929     bool isSStore = false;
3930 
3931     switch (UseMCID.getOpcode()) {
3932     default: break;
3933     case ARM::VSTMSIA:
3934     case ARM::VSTMSIA_UPD:
3935     case ARM::VSTMSDB_UPD:
3936       isSStore = true;
3937       break;
3938     }
3939 
3940     // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3941     // then it takes an extra cycle.
3942     if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3943       ++UseCycle;
3944   } else {
3945     // Assume the worst.
3946     UseCycle = RegNo + 2;
3947   }
3948 
3949   return UseCycle;
3950 }
3951 
3952 int
3953 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
3954                                  const MCInstrDesc &UseMCID,
3955                                  unsigned UseClass,
3956                                  unsigned UseIdx, unsigned UseAlign) const {
3957   int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3958   if (RegNo <= 0)
3959     return ItinData->getOperandCycle(UseClass, UseIdx);
3960 
3961   int UseCycle;
3962   if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3963     UseCycle = RegNo / 2;
3964     if (UseCycle < 2)
3965       UseCycle = 2;
3966     // Read in E3.
3967     UseCycle += 2;
3968   } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3969     UseCycle = (RegNo / 2);
3970     // If there are odd number of registers or if it's not 64-bit aligned,
3971     // then it takes an extra AGU (Address Generation Unit) cycle.
3972     if ((RegNo % 2) || UseAlign < 8)
3973       ++UseCycle;
3974   } else {
3975     // Assume the worst.
3976     UseCycle = 1;
3977   }
3978   return UseCycle;
3979 }
3980 
3981 int
3982 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
3983                                     const MCInstrDesc &DefMCID,
3984                                     unsigned DefIdx, unsigned DefAlign,
3985                                     const MCInstrDesc &UseMCID,
3986                                     unsigned UseIdx, unsigned UseAlign) const {
3987   unsigned DefClass = DefMCID.getSchedClass();
3988   unsigned UseClass = UseMCID.getSchedClass();
3989 
3990   if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
3991     return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
3992 
3993   // This may be a def / use of a variable_ops instruction, the operand
3994   // latency might be determinable dynamically. Let the target try to
3995   // figure it out.
3996   int DefCycle = -1;
3997   bool LdmBypass = false;
3998   switch (DefMCID.getOpcode()) {
3999   default:
4000     DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4001     break;
4002 
4003   case ARM::VLDMDIA:
4004   case ARM::VLDMDIA_UPD:
4005   case ARM::VLDMDDB_UPD:
4006   case ARM::VLDMSIA:
4007   case ARM::VLDMSIA_UPD:
4008   case ARM::VLDMSDB_UPD:
4009     DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4010     break;
4011 
4012   case ARM::LDMIA_RET:
4013   case ARM::LDMIA:
4014   case ARM::LDMDA:
4015   case ARM::LDMDB:
4016   case ARM::LDMIB:
4017   case ARM::LDMIA_UPD:
4018   case ARM::LDMDA_UPD:
4019   case ARM::LDMDB_UPD:
4020   case ARM::LDMIB_UPD:
4021   case ARM::tLDMIA:
4022   case ARM::tLDMIA_UPD:
4023   case ARM::tPUSH:
4024   case ARM::t2LDMIA_RET:
4025   case ARM::t2LDMIA:
4026   case ARM::t2LDMDB:
4027   case ARM::t2LDMIA_UPD:
4028   case ARM::t2LDMDB_UPD:
4029     LdmBypass = true;
4030     DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4031     break;
4032   }
4033 
4034   if (DefCycle == -1)
4035     // We can't seem to determine the result latency of the def, assume it's 2.
4036     DefCycle = 2;
4037 
4038   int UseCycle = -1;
4039   switch (UseMCID.getOpcode()) {
4040   default:
4041     UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
4042     break;
4043 
4044   case ARM::VSTMDIA:
4045   case ARM::VSTMDIA_UPD:
4046   case ARM::VSTMDDB_UPD:
4047   case ARM::VSTMSIA:
4048   case ARM::VSTMSIA_UPD:
4049   case ARM::VSTMSDB_UPD:
4050     UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4051     break;
4052 
4053   case ARM::STMIA:
4054   case ARM::STMDA:
4055   case ARM::STMDB:
4056   case ARM::STMIB:
4057   case ARM::STMIA_UPD:
4058   case ARM::STMDA_UPD:
4059   case ARM::STMDB_UPD:
4060   case ARM::STMIB_UPD:
4061   case ARM::tSTMIA_UPD:
4062   case ARM::tPOP_RET:
4063   case ARM::tPOP:
4064   case ARM::t2STMIA:
4065   case ARM::t2STMDB:
4066   case ARM::t2STMIA_UPD:
4067   case ARM::t2STMDB_UPD:
4068     UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4069     break;
4070   }
4071 
4072   if (UseCycle == -1)
4073     // Assume it's read in the first stage.
4074     UseCycle = 1;
4075 
4076   UseCycle = DefCycle - UseCycle + 1;
4077   if (UseCycle > 0) {
4078     if (LdmBypass) {
4079       // It's a variable_ops instruction so we can't use DefIdx here. Just use
4080       // first def operand.
4081       if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
4082                                           UseClass, UseIdx))
4083         --UseCycle;
4084     } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
4085                                                UseClass, UseIdx)) {
4086       --UseCycle;
4087     }
4088   }
4089 
4090   return UseCycle;
4091 }
4092 
4093 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
4094                                            const MachineInstr *MI, unsigned Reg,
4095                                            unsigned &DefIdx, unsigned &Dist) {
4096   Dist = 0;
4097 
4098   MachineBasicBlock::const_iterator I = MI; ++I;
4099   MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
4100   assert(II->isInsideBundle() && "Empty bundle?");
4101 
4102   int Idx = -1;
4103   while (II->isInsideBundle()) {
4104     Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
4105     if (Idx != -1)
4106       break;
4107     --II;
4108     ++Dist;
4109   }
4110 
4111   assert(Idx != -1 && "Cannot find bundled definition!");
4112   DefIdx = Idx;
4113   return &*II;
4114 }
4115 
4116 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
4117                                            const MachineInstr &MI, unsigned Reg,
4118                                            unsigned &UseIdx, unsigned &Dist) {
4119   Dist = 0;
4120 
4121   MachineBasicBlock::const_instr_iterator II = ++MI.getIterator();
4122   assert(II->isInsideBundle() && "Empty bundle?");
4123   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4124 
4125   // FIXME: This doesn't properly handle multiple uses.
4126   int Idx = -1;
4127   while (II != E && II->isInsideBundle()) {
4128     Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
4129     if (Idx != -1)
4130       break;
4131     if (II->getOpcode() != ARM::t2IT)
4132       ++Dist;
4133     ++II;
4134   }
4135 
4136   if (Idx == -1) {
4137     Dist = 0;
4138     return nullptr;
4139   }
4140 
4141   UseIdx = Idx;
4142   return &*II;
4143 }
4144 
4145 /// Return the number of cycles to add to (or subtract from) the static
4146 /// itinerary based on the def opcode and alignment. The caller will ensure that
4147 /// adjusted latency is at least one cycle.
4148 static int adjustDefLatency(const ARMSubtarget &Subtarget,
4149                             const MachineInstr &DefMI,
4150                             const MCInstrDesc &DefMCID, unsigned DefAlign) {
4151   int Adjust = 0;
4152   if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
4153     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4154     // variants are one cycle cheaper.
4155     switch (DefMCID.getOpcode()) {
4156     default: break;
4157     case ARM::LDRrs:
4158     case ARM::LDRBrs: {
4159       unsigned ShOpVal = DefMI.getOperand(3).getImm();
4160       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4161       if (ShImm == 0 ||
4162           (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4163         --Adjust;
4164       break;
4165     }
4166     case ARM::t2LDRs:
4167     case ARM::t2LDRBs:
4168     case ARM::t2LDRHs:
4169     case ARM::t2LDRSHs: {
4170       // Thumb2 mode: lsl only.
4171       unsigned ShAmt = DefMI.getOperand(3).getImm();
4172       if (ShAmt == 0 || ShAmt == 2)
4173         --Adjust;
4174       break;
4175     }
4176     }
4177   } else if (Subtarget.isSwift()) {
4178     // FIXME: Properly handle all of the latency adjustments for address
4179     // writeback.
4180     switch (DefMCID.getOpcode()) {
4181     default: break;
4182     case ARM::LDRrs:
4183     case ARM::LDRBrs: {
4184       unsigned ShOpVal = DefMI.getOperand(3).getImm();
4185       bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
4186       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4187       if (!isSub &&
4188           (ShImm == 0 ||
4189            ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4190             ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
4191         Adjust -= 2;
4192       else if (!isSub &&
4193                ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4194         --Adjust;
4195       break;
4196     }
4197     case ARM::t2LDRs:
4198     case ARM::t2LDRBs:
4199     case ARM::t2LDRHs:
4200     case ARM::t2LDRSHs: {
4201       // Thumb2 mode: lsl only.
4202       unsigned ShAmt = DefMI.getOperand(3).getImm();
4203       if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4204         Adjust -= 2;
4205       break;
4206     }
4207     }
4208   }
4209 
4210   if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4211     switch (DefMCID.getOpcode()) {
4212     default: break;
4213     case ARM::VLD1q8:
4214     case ARM::VLD1q16:
4215     case ARM::VLD1q32:
4216     case ARM::VLD1q64:
4217     case ARM::VLD1q8wb_fixed:
4218     case ARM::VLD1q16wb_fixed:
4219     case ARM::VLD1q32wb_fixed:
4220     case ARM::VLD1q64wb_fixed:
4221     case ARM::VLD1q8wb_register:
4222     case ARM::VLD1q16wb_register:
4223     case ARM::VLD1q32wb_register:
4224     case ARM::VLD1q64wb_register:
4225     case ARM::VLD2d8:
4226     case ARM::VLD2d16:
4227     case ARM::VLD2d32:
4228     case ARM::VLD2q8:
4229     case ARM::VLD2q16:
4230     case ARM::VLD2q32:
4231     case ARM::VLD2d8wb_fixed:
4232     case ARM::VLD2d16wb_fixed:
4233     case ARM::VLD2d32wb_fixed:
4234     case ARM::VLD2q8wb_fixed:
4235     case ARM::VLD2q16wb_fixed:
4236     case ARM::VLD2q32wb_fixed:
4237     case ARM::VLD2d8wb_register:
4238     case ARM::VLD2d16wb_register:
4239     case ARM::VLD2d32wb_register:
4240     case ARM::VLD2q8wb_register:
4241     case ARM::VLD2q16wb_register:
4242     case ARM::VLD2q32wb_register:
4243     case ARM::VLD3d8:
4244     case ARM::VLD3d16:
4245     case ARM::VLD3d32:
4246     case ARM::VLD1d64T:
4247     case ARM::VLD3d8_UPD:
4248     case ARM::VLD3d16_UPD:
4249     case ARM::VLD3d32_UPD:
4250     case ARM::VLD1d64Twb_fixed:
4251     case ARM::VLD1d64Twb_register:
4252     case ARM::VLD3q8_UPD:
4253     case ARM::VLD3q16_UPD:
4254     case ARM::VLD3q32_UPD:
4255     case ARM::VLD4d8:
4256     case ARM::VLD4d16:
4257     case ARM::VLD4d32:
4258     case ARM::VLD1d64Q:
4259     case ARM::VLD4d8_UPD:
4260     case ARM::VLD4d16_UPD:
4261     case ARM::VLD4d32_UPD:
4262     case ARM::VLD1d64Qwb_fixed:
4263     case ARM::VLD1d64Qwb_register:
4264     case ARM::VLD4q8_UPD:
4265     case ARM::VLD4q16_UPD:
4266     case ARM::VLD4q32_UPD:
4267     case ARM::VLD1DUPq8:
4268     case ARM::VLD1DUPq16:
4269     case ARM::VLD1DUPq32:
4270     case ARM::VLD1DUPq8wb_fixed:
4271     case ARM::VLD1DUPq16wb_fixed:
4272     case ARM::VLD1DUPq32wb_fixed:
4273     case ARM::VLD1DUPq8wb_register:
4274     case ARM::VLD1DUPq16wb_register:
4275     case ARM::VLD1DUPq32wb_register:
4276     case ARM::VLD2DUPd8:
4277     case ARM::VLD2DUPd16:
4278     case ARM::VLD2DUPd32:
4279     case ARM::VLD2DUPd8wb_fixed:
4280     case ARM::VLD2DUPd16wb_fixed:
4281     case ARM::VLD2DUPd32wb_fixed:
4282     case ARM::VLD2DUPd8wb_register:
4283     case ARM::VLD2DUPd16wb_register:
4284     case ARM::VLD2DUPd32wb_register:
4285     case ARM::VLD4DUPd8:
4286     case ARM::VLD4DUPd16:
4287     case ARM::VLD4DUPd32:
4288     case ARM::VLD4DUPd8_UPD:
4289     case ARM::VLD4DUPd16_UPD:
4290     case ARM::VLD4DUPd32_UPD:
4291     case ARM::VLD1LNd8:
4292     case ARM::VLD1LNd16:
4293     case ARM::VLD1LNd32:
4294     case ARM::VLD1LNd8_UPD:
4295     case ARM::VLD1LNd16_UPD:
4296     case ARM::VLD1LNd32_UPD:
4297     case ARM::VLD2LNd8:
4298     case ARM::VLD2LNd16:
4299     case ARM::VLD2LNd32:
4300     case ARM::VLD2LNq16:
4301     case ARM::VLD2LNq32:
4302     case ARM::VLD2LNd8_UPD:
4303     case ARM::VLD2LNd16_UPD:
4304     case ARM::VLD2LNd32_UPD:
4305     case ARM::VLD2LNq16_UPD:
4306     case ARM::VLD2LNq32_UPD:
4307     case ARM::VLD4LNd8:
4308     case ARM::VLD4LNd16:
4309     case ARM::VLD4LNd32:
4310     case ARM::VLD4LNq16:
4311     case ARM::VLD4LNq32:
4312     case ARM::VLD4LNd8_UPD:
4313     case ARM::VLD4LNd16_UPD:
4314     case ARM::VLD4LNd32_UPD:
4315     case ARM::VLD4LNq16_UPD:
4316     case ARM::VLD4LNq32_UPD:
4317       // If the address is not 64-bit aligned, the latencies of these
4318       // instructions increases by one.
4319       ++Adjust;
4320       break;
4321     }
4322   }
4323   return Adjust;
4324 }
4325 
4326 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4327                                         const MachineInstr &DefMI,
4328                                         unsigned DefIdx,
4329                                         const MachineInstr &UseMI,
4330                                         unsigned UseIdx) const {
4331   // No operand latency. The caller may fall back to getInstrLatency.
4332   if (!ItinData || ItinData->isEmpty())
4333     return -1;
4334 
4335   const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4336   Register Reg = DefMO.getReg();
4337 
4338   const MachineInstr *ResolvedDefMI = &DefMI;
4339   unsigned DefAdj = 0;
4340   if (DefMI.isBundle())
4341     ResolvedDefMI =
4342         getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj);
4343   if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() ||
4344       ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) {
4345     return 1;
4346   }
4347 
4348   const MachineInstr *ResolvedUseMI = &UseMI;
4349   unsigned UseAdj = 0;
4350   if (UseMI.isBundle()) {
4351     ResolvedUseMI =
4352         getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj);
4353     if (!ResolvedUseMI)
4354       return -1;
4355   }
4356 
4357   return getOperandLatencyImpl(
4358       ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO,
4359       Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj);
4360 }
4361 
4362 int ARMBaseInstrInfo::getOperandLatencyImpl(
4363     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
4364     unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
4365     const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
4366     unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {
4367   if (Reg == ARM::CPSR) {
4368     if (DefMI.getOpcode() == ARM::FMSTAT) {
4369       // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
4370       return Subtarget.isLikeA9() ? 1 : 20;
4371     }
4372 
4373     // CPSR set and branch can be paired in the same cycle.
4374     if (UseMI.isBranch())
4375       return 0;
4376 
4377     // Otherwise it takes the instruction latency (generally one).
4378     unsigned Latency = getInstrLatency(ItinData, DefMI);
4379 
4380     // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
4381     // its uses. Instructions which are otherwise scheduled between them may
4382     // incur a code size penalty (not able to use the CPSR setting 16-bit
4383     // instructions).
4384     if (Latency > 0 && Subtarget.isThumb2()) {
4385       const MachineFunction *MF = DefMI.getParent()->getParent();
4386       // FIXME: Use Function::hasOptSize().
4387       if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
4388         --Latency;
4389     }
4390     return Latency;
4391   }
4392 
4393   if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit())
4394     return -1;
4395 
4396   unsigned DefAlign = DefMI.hasOneMemOperand()
4397                           ? (*DefMI.memoperands_begin())->getAlign().value()
4398                           : 0;
4399   unsigned UseAlign = UseMI.hasOneMemOperand()
4400                           ? (*UseMI.memoperands_begin())->getAlign().value()
4401                           : 0;
4402 
4403   // Get the itinerary's latency if possible, and handle variable_ops.
4404   int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID,
4405                                   UseIdx, UseAlign);
4406   // Unable to find operand latency. The caller may resort to getInstrLatency.
4407   if (Latency < 0)
4408     return Latency;
4409 
4410   // Adjust for IT block position.
4411   int Adj = DefAdj + UseAdj;
4412 
4413   // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4414   Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
4415   if (Adj >= 0 || (int)Latency > -Adj) {
4416     return Latency + Adj;
4417   }
4418   // Return the itinerary latency, which may be zero but not less than zero.
4419   return Latency;
4420 }
4421 
4422 int
4423 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4424                                     SDNode *DefNode, unsigned DefIdx,
4425                                     SDNode *UseNode, unsigned UseIdx) const {
4426   if (!DefNode->isMachineOpcode())
4427     return 1;
4428 
4429   const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
4430 
4431   if (isZeroCost(DefMCID.Opcode))
4432     return 0;
4433 
4434   if (!ItinData || ItinData->isEmpty())
4435     return DefMCID.mayLoad() ? 3 : 1;
4436 
4437   if (!UseNode->isMachineOpcode()) {
4438     int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
4439     int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4440     int Threshold = 1 + Adj;
4441     return Latency <= Threshold ? 1 : Latency - Adj;
4442   }
4443 
4444   const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
4445   auto *DefMN = cast<MachineSDNode>(DefNode);
4446   unsigned DefAlign = !DefMN->memoperands_empty()
4447                           ? (*DefMN->memoperands_begin())->getAlign().value()
4448                           : 0;
4449   auto *UseMN = cast<MachineSDNode>(UseNode);
4450   unsigned UseAlign = !UseMN->memoperands_empty()
4451                           ? (*UseMN->memoperands_begin())->getAlign().value()
4452                           : 0;
4453   int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
4454                                   UseMCID, UseIdx, UseAlign);
4455 
4456   if (Latency > 1 &&
4457       (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4458        Subtarget.isCortexA7())) {
4459     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4460     // variants are one cycle cheaper.
4461     switch (DefMCID.getOpcode()) {
4462     default: break;
4463     case ARM::LDRrs:
4464     case ARM::LDRBrs: {
4465       unsigned ShOpVal =
4466         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4467       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4468       if (ShImm == 0 ||
4469           (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4470         --Latency;
4471       break;
4472     }
4473     case ARM::t2LDRs:
4474     case ARM::t2LDRBs:
4475     case ARM::t2LDRHs:
4476     case ARM::t2LDRSHs: {
4477       // Thumb2 mode: lsl only.
4478       unsigned ShAmt =
4479         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4480       if (ShAmt == 0 || ShAmt == 2)
4481         --Latency;
4482       break;
4483     }
4484     }
4485   } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
4486     // FIXME: Properly handle all of the latency adjustments for address
4487     // writeback.
4488     switch (DefMCID.getOpcode()) {
4489     default: break;
4490     case ARM::LDRrs:
4491     case ARM::LDRBrs: {
4492       unsigned ShOpVal =
4493         cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4494       unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4495       if (ShImm == 0 ||
4496           ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4497            ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4498         Latency -= 2;
4499       else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4500         --Latency;
4501       break;
4502     }
4503     case ARM::t2LDRs:
4504     case ARM::t2LDRBs:
4505     case ARM::t2LDRHs:
4506     case ARM::t2LDRSHs:
4507       // Thumb2 mode: lsl 0-3 only.
4508       Latency -= 2;
4509       break;
4510     }
4511   }
4512 
4513   if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4514     switch (DefMCID.getOpcode()) {
4515     default: break;
4516     case ARM::VLD1q8:
4517     case ARM::VLD1q16:
4518     case ARM::VLD1q32:
4519     case ARM::VLD1q64:
4520     case ARM::VLD1q8wb_register:
4521     case ARM::VLD1q16wb_register:
4522     case ARM::VLD1q32wb_register:
4523     case ARM::VLD1q64wb_register:
4524     case ARM::VLD1q8wb_fixed:
4525     case ARM::VLD1q16wb_fixed:
4526     case ARM::VLD1q32wb_fixed:
4527     case ARM::VLD1q64wb_fixed:
4528     case ARM::VLD2d8:
4529     case ARM::VLD2d16:
4530     case ARM::VLD2d32:
4531     case ARM::VLD2q8Pseudo:
4532     case ARM::VLD2q16Pseudo:
4533     case ARM::VLD2q32Pseudo:
4534     case ARM::VLD2d8wb_fixed:
4535     case ARM::VLD2d16wb_fixed:
4536     case ARM::VLD2d32wb_fixed:
4537     case ARM::VLD2q8PseudoWB_fixed:
4538     case ARM::VLD2q16PseudoWB_fixed:
4539     case ARM::VLD2q32PseudoWB_fixed:
4540     case ARM::VLD2d8wb_register:
4541     case ARM::VLD2d16wb_register:
4542     case ARM::VLD2d32wb_register:
4543     case ARM::VLD2q8PseudoWB_register:
4544     case ARM::VLD2q16PseudoWB_register:
4545     case ARM::VLD2q32PseudoWB_register:
4546     case ARM::VLD3d8Pseudo:
4547     case ARM::VLD3d16Pseudo:
4548     case ARM::VLD3d32Pseudo:
4549     case ARM::VLD1d8TPseudo:
4550     case ARM::VLD1d16TPseudo:
4551     case ARM::VLD1d32TPseudo:
4552     case ARM::VLD1d64TPseudo:
4553     case ARM::VLD1d64TPseudoWB_fixed:
4554     case ARM::VLD1d64TPseudoWB_register:
4555     case ARM::VLD3d8Pseudo_UPD:
4556     case ARM::VLD3d16Pseudo_UPD:
4557     case ARM::VLD3d32Pseudo_UPD:
4558     case ARM::VLD3q8Pseudo_UPD:
4559     case ARM::VLD3q16Pseudo_UPD:
4560     case ARM::VLD3q32Pseudo_UPD:
4561     case ARM::VLD3q8oddPseudo:
4562     case ARM::VLD3q16oddPseudo:
4563     case ARM::VLD3q32oddPseudo:
4564     case ARM::VLD3q8oddPseudo_UPD:
4565     case ARM::VLD3q16oddPseudo_UPD:
4566     case ARM::VLD3q32oddPseudo_UPD:
4567     case ARM::VLD4d8Pseudo:
4568     case ARM::VLD4d16Pseudo:
4569     case ARM::VLD4d32Pseudo:
4570     case ARM::VLD1d8QPseudo:
4571     case ARM::VLD1d16QPseudo:
4572     case ARM::VLD1d32QPseudo:
4573     case ARM::VLD1d64QPseudo:
4574     case ARM::VLD1d64QPseudoWB_fixed:
4575     case ARM::VLD1d64QPseudoWB_register:
4576     case ARM::VLD1q8HighQPseudo:
4577     case ARM::VLD1q8LowQPseudo_UPD:
4578     case ARM::VLD1q8HighTPseudo:
4579     case ARM::VLD1q8LowTPseudo_UPD:
4580     case ARM::VLD1q16HighQPseudo:
4581     case ARM::VLD1q16LowQPseudo_UPD:
4582     case ARM::VLD1q16HighTPseudo:
4583     case ARM::VLD1q16LowTPseudo_UPD:
4584     case ARM::VLD1q32HighQPseudo:
4585     case ARM::VLD1q32LowQPseudo_UPD:
4586     case ARM::VLD1q32HighTPseudo:
4587     case ARM::VLD1q32LowTPseudo_UPD:
4588     case ARM::VLD1q64HighQPseudo:
4589     case ARM::VLD1q64LowQPseudo_UPD:
4590     case ARM::VLD1q64HighTPseudo:
4591     case ARM::VLD1q64LowTPseudo_UPD:
4592     case ARM::VLD4d8Pseudo_UPD:
4593     case ARM::VLD4d16Pseudo_UPD:
4594     case ARM::VLD4d32Pseudo_UPD:
4595     case ARM::VLD4q8Pseudo_UPD:
4596     case ARM::VLD4q16Pseudo_UPD:
4597     case ARM::VLD4q32Pseudo_UPD:
4598     case ARM::VLD4q8oddPseudo:
4599     case ARM::VLD4q16oddPseudo:
4600     case ARM::VLD4q32oddPseudo:
4601     case ARM::VLD4q8oddPseudo_UPD:
4602     case ARM::VLD4q16oddPseudo_UPD:
4603     case ARM::VLD4q32oddPseudo_UPD:
4604     case ARM::VLD1DUPq8:
4605     case ARM::VLD1DUPq16:
4606     case ARM::VLD1DUPq32:
4607     case ARM::VLD1DUPq8wb_fixed:
4608     case ARM::VLD1DUPq16wb_fixed:
4609     case ARM::VLD1DUPq32wb_fixed:
4610     case ARM::VLD1DUPq8wb_register:
4611     case ARM::VLD1DUPq16wb_register:
4612     case ARM::VLD1DUPq32wb_register:
4613     case ARM::VLD2DUPd8:
4614     case ARM::VLD2DUPd16:
4615     case ARM::VLD2DUPd32:
4616     case ARM::VLD2DUPd8wb_fixed:
4617     case ARM::VLD2DUPd16wb_fixed:
4618     case ARM::VLD2DUPd32wb_fixed:
4619     case ARM::VLD2DUPd8wb_register:
4620     case ARM::VLD2DUPd16wb_register:
4621     case ARM::VLD2DUPd32wb_register:
4622     case ARM::VLD2DUPq8EvenPseudo:
4623     case ARM::VLD2DUPq8OddPseudo:
4624     case ARM::VLD2DUPq16EvenPseudo:
4625     case ARM::VLD2DUPq16OddPseudo:
4626     case ARM::VLD2DUPq32EvenPseudo:
4627     case ARM::VLD2DUPq32OddPseudo:
4628     case ARM::VLD3DUPq8EvenPseudo:
4629     case ARM::VLD3DUPq8OddPseudo:
4630     case ARM::VLD3DUPq16EvenPseudo:
4631     case ARM::VLD3DUPq16OddPseudo:
4632     case ARM::VLD3DUPq32EvenPseudo:
4633     case ARM::VLD3DUPq32OddPseudo:
4634     case ARM::VLD4DUPd8Pseudo:
4635     case ARM::VLD4DUPd16Pseudo:
4636     case ARM::VLD4DUPd32Pseudo:
4637     case ARM::VLD4DUPd8Pseudo_UPD:
4638     case ARM::VLD4DUPd16Pseudo_UPD:
4639     case ARM::VLD4DUPd32Pseudo_UPD:
4640     case ARM::VLD4DUPq8EvenPseudo:
4641     case ARM::VLD4DUPq8OddPseudo:
4642     case ARM::VLD4DUPq16EvenPseudo:
4643     case ARM::VLD4DUPq16OddPseudo:
4644     case ARM::VLD4DUPq32EvenPseudo:
4645     case ARM::VLD4DUPq32OddPseudo:
4646     case ARM::VLD1LNq8Pseudo:
4647     case ARM::VLD1LNq16Pseudo:
4648     case ARM::VLD1LNq32Pseudo:
4649     case ARM::VLD1LNq8Pseudo_UPD:
4650     case ARM::VLD1LNq16Pseudo_UPD:
4651     case ARM::VLD1LNq32Pseudo_UPD:
4652     case ARM::VLD2LNd8Pseudo:
4653     case ARM::VLD2LNd16Pseudo:
4654     case ARM::VLD2LNd32Pseudo:
4655     case ARM::VLD2LNq16Pseudo:
4656     case ARM::VLD2LNq32Pseudo:
4657     case ARM::VLD2LNd8Pseudo_UPD:
4658     case ARM::VLD2LNd16Pseudo_UPD:
4659     case ARM::VLD2LNd32Pseudo_UPD:
4660     case ARM::VLD2LNq16Pseudo_UPD:
4661     case ARM::VLD2LNq32Pseudo_UPD:
4662     case ARM::VLD4LNd8Pseudo:
4663     case ARM::VLD4LNd16Pseudo:
4664     case ARM::VLD4LNd32Pseudo:
4665     case ARM::VLD4LNq16Pseudo:
4666     case ARM::VLD4LNq32Pseudo:
4667     case ARM::VLD4LNd8Pseudo_UPD:
4668     case ARM::VLD4LNd16Pseudo_UPD:
4669     case ARM::VLD4LNd32Pseudo_UPD:
4670     case ARM::VLD4LNq16Pseudo_UPD:
4671     case ARM::VLD4LNq32Pseudo_UPD:
4672       // If the address is not 64-bit aligned, the latencies of these
4673       // instructions increases by one.
4674       ++Latency;
4675       break;
4676     }
4677 
4678   return Latency;
4679 }
4680 
4681 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {
4682   if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4683       MI.isImplicitDef())
4684     return 0;
4685 
4686   if (MI.isBundle())
4687     return 0;
4688 
4689   const MCInstrDesc &MCID = MI.getDesc();
4690 
4691   if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4692                         !Subtarget.cheapPredicableCPSRDef())) {
4693     // When predicated, CPSR is an additional source operand for CPSR updating
4694     // instructions, this apparently increases their latencies.
4695     return 1;
4696   }
4697   return 0;
4698 }
4699 
4700 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4701                                            const MachineInstr &MI,
4702                                            unsigned *PredCost) const {
4703   if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4704       MI.isImplicitDef())
4705     return 1;
4706 
4707   // An instruction scheduler typically runs on unbundled instructions, however
4708   // other passes may query the latency of a bundled instruction.
4709   if (MI.isBundle()) {
4710     unsigned Latency = 0;
4711     MachineBasicBlock::const_instr_iterator I = MI.getIterator();
4712     MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4713     while (++I != E && I->isInsideBundle()) {
4714       if (I->getOpcode() != ARM::t2IT)
4715         Latency += getInstrLatency(ItinData, *I, PredCost);
4716     }
4717     return Latency;
4718   }
4719 
4720   const MCInstrDesc &MCID = MI.getDesc();
4721   if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4722                                      !Subtarget.cheapPredicableCPSRDef()))) {
4723     // When predicated, CPSR is an additional source operand for CPSR updating
4724     // instructions, this apparently increases their latencies.
4725     *PredCost = 1;
4726   }
4727   // Be sure to call getStageLatency for an empty itinerary in case it has a
4728   // valid MinLatency property.
4729   if (!ItinData)
4730     return MI.mayLoad() ? 3 : 1;
4731 
4732   unsigned Class = MCID.getSchedClass();
4733 
4734   // For instructions with variable uops, use uops as latency.
4735   if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
4736     return getNumMicroOps(ItinData, MI);
4737 
4738   // For the common case, fall back on the itinerary's latency.
4739   unsigned Latency = ItinData->getStageLatency(Class);
4740 
4741   // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4742   unsigned DefAlign =
4743       MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0;
4744   int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
4745   if (Adj >= 0 || (int)Latency > -Adj) {
4746     return Latency + Adj;
4747   }
4748   return Latency;
4749 }
4750 
4751 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4752                                       SDNode *Node) const {
4753   if (!Node->isMachineOpcode())
4754     return 1;
4755 
4756   if (!ItinData || ItinData->isEmpty())
4757     return 1;
4758 
4759   unsigned Opcode = Node->getMachineOpcode();
4760   switch (Opcode) {
4761   default:
4762     return ItinData->getStageLatency(get(Opcode).getSchedClass());
4763   case ARM::VLDMQIA:
4764   case ARM::VSTMQIA:
4765     return 2;
4766   }
4767 }
4768 
4769 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
4770                                              const MachineRegisterInfo *MRI,
4771                                              const MachineInstr &DefMI,
4772                                              unsigned DefIdx,
4773                                              const MachineInstr &UseMI,
4774                                              unsigned UseIdx) const {
4775   unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4776   unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask;
4777   if (Subtarget.nonpipelinedVFP() &&
4778       (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
4779     return true;
4780 
4781   // Hoist VFP / NEON instructions with 4 or higher latency.
4782   unsigned Latency =
4783       SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx);
4784   if (Latency <= 3)
4785     return false;
4786   return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
4787          UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
4788 }
4789 
4790 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
4791                                         const MachineInstr &DefMI,
4792                                         unsigned DefIdx) const {
4793   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
4794   if (!ItinData || ItinData->isEmpty())
4795     return false;
4796 
4797   unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4798   if (DDomain == ARMII::DomainGeneral) {
4799     unsigned DefClass = DefMI.getDesc().getSchedClass();
4800     int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4801     return (DefCycle != -1 && DefCycle <= 2);
4802   }
4803   return false;
4804 }
4805 
4806 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
4807                                          StringRef &ErrInfo) const {
4808   if (convertAddSubFlagsOpcode(MI.getOpcode())) {
4809     ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
4810     return false;
4811   }
4812   if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4813     // Make sure we don't generate a lo-lo mov that isn't supported.
4814     if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) &&
4815         !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) {
4816       ErrInfo = "Non-flag-setting Thumb1 mov is v6-only";
4817       return false;
4818     }
4819   }
4820   if (MI.getOpcode() == ARM::tPUSH ||
4821       MI.getOpcode() == ARM::tPOP ||
4822       MI.getOpcode() == ARM::tPOP_RET) {
4823     for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2)) {
4824       if (MO.isImplicit() || !MO.isReg())
4825         continue;
4826       Register Reg = MO.getReg();
4827       if (Reg < ARM::R0 || Reg > ARM::R7) {
4828         if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4829             !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4830           ErrInfo = "Unsupported register in Thumb1 push/pop";
4831           return false;
4832         }
4833       }
4834     }
4835   }
4836   if (MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4837     assert(MI.getOperand(4).isImm() && MI.getOperand(5).isImm());
4838     if ((MI.getOperand(4).getImm() != 2 && MI.getOperand(4).getImm() != 3) ||
4839         MI.getOperand(4).getImm() != MI.getOperand(5).getImm() + 2) {
4840       ErrInfo = "Incorrect array index for MVE_VMOV_q_rr";
4841       return false;
4842     }
4843   }
4844 
4845   // Check the address model by taking the first Imm operand and checking it is
4846   // legal for that addressing mode.
4847   ARMII::AddrMode AddrMode =
4848       (ARMII::AddrMode)(MI.getDesc().TSFlags & ARMII::AddrModeMask);
4849   switch (AddrMode) {
4850   default:
4851     break;
4852   case ARMII::AddrModeT2_i7:
4853   case ARMII::AddrModeT2_i7s2:
4854   case ARMII::AddrModeT2_i7s4:
4855   case ARMII::AddrModeT2_i8:
4856   case ARMII::AddrModeT2_i8pos:
4857   case ARMII::AddrModeT2_i8neg:
4858   case ARMII::AddrModeT2_i8s4:
4859   case ARMII::AddrModeT2_i12: {
4860     uint32_t Imm = 0;
4861     for (auto Op : MI.operands()) {
4862       if (Op.isImm()) {
4863         Imm = Op.getImm();
4864         break;
4865       }
4866     }
4867     if (!isLegalAddressImm(MI.getOpcode(), Imm, this)) {
4868       ErrInfo = "Incorrect AddrMode Imm for instruction";
4869       return false;
4870     }
4871     break;
4872   }
4873   }
4874   return true;
4875 }
4876 
4877 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
4878                                                 unsigned LoadImmOpc,
4879                                                 unsigned LoadOpc) const {
4880   assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4881          "ROPI/RWPI not currently supported with stack guard");
4882 
4883   MachineBasicBlock &MBB = *MI->getParent();
4884   DebugLoc DL = MI->getDebugLoc();
4885   Register Reg = MI->getOperand(0).getReg();
4886   MachineInstrBuilder MIB;
4887   unsigned int Offset = 0;
4888 
4889   if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4890     assert(Subtarget.isReadTPHard() &&
4891            "TLS stack protector requires hardware TLS register");
4892 
4893     BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4894         .addImm(15)
4895         .addImm(0)
4896         .addImm(13)
4897         .addImm(0)
4898         .addImm(3)
4899         .add(predOps(ARMCC::AL));
4900 
4901     Module &M = *MBB.getParent()->getFunction().getParent();
4902     Offset = M.getStackProtectorGuardOffset();
4903     if (Offset & ~0xfffU) {
4904       // The offset won't fit in the LDR's 12-bit immediate field, so emit an
4905       // extra ADD to cover the delta. This gives us a guaranteed 8 additional
4906       // bits, resulting in a range of 0 to +1 MiB for the guard offset.
4907       unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4908       BuildMI(MBB, MI, DL, get(AddOpc), Reg)
4909           .addReg(Reg, RegState::Kill)
4910           .addImm(Offset & ~0xfffU)
4911           .add(predOps(ARMCC::AL))
4912           .addReg(0);
4913       Offset &= 0xfffU;
4914     }
4915   } else {
4916     const GlobalValue *GV =
4917         cast<GlobalValue>((*MI->memoperands_begin())->getValue());
4918     bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4919 
4920     unsigned TargetFlags = ARMII::MO_NO_FLAG;
4921     if (Subtarget.isTargetMachO()) {
4922       TargetFlags |= ARMII::MO_NONLAZY;
4923     } else if (Subtarget.isTargetCOFF()) {
4924       if (GV->hasDLLImportStorageClass())
4925         TargetFlags |= ARMII::MO_DLLIMPORT;
4926       else if (IsIndirect)
4927         TargetFlags |= ARMII::MO_COFFSTUB;
4928     } else if (Subtarget.isGVInGOT(GV)) {
4929       TargetFlags |= ARMII::MO_GOT;
4930     }
4931 
4932     BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4933         .addGlobalAddress(GV, 0, TargetFlags);
4934 
4935     if (IsIndirect) {
4936       MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4937       MIB.addReg(Reg, RegState::Kill).addImm(0);
4938       auto Flags = MachineMemOperand::MOLoad |
4939                    MachineMemOperand::MODereferenceable |
4940                    MachineMemOperand::MOInvariant;
4941       MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4942           MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4));
4943       MIB.addMemOperand(MMO).add(predOps(ARMCC::AL));
4944     }
4945   }
4946 
4947   MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4948   MIB.addReg(Reg, RegState::Kill)
4949       .addImm(Offset)
4950       .cloneMemRefs(*MI)
4951       .add(predOps(ARMCC::AL));
4952 }
4953 
4954 bool
4955 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
4956                                      unsigned &AddSubOpc,
4957                                      bool &NegAcc, bool &HasLane) const {
4958   DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
4959   if (I == MLxEntryMap.end())
4960     return false;
4961 
4962   const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
4963   MulOpc = Entry.MulOpc;
4964   AddSubOpc = Entry.AddSubOpc;
4965   NegAcc = Entry.NegAcc;
4966   HasLane = Entry.HasLane;
4967   return true;
4968 }
4969 
4970 //===----------------------------------------------------------------------===//
4971 // Execution domains.
4972 //===----------------------------------------------------------------------===//
4973 //
4974 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
4975 // and some can go down both.  The vmov instructions go down the VFP pipeline,
4976 // but they can be changed to vorr equivalents that are executed by the NEON
4977 // pipeline.
4978 //
4979 // We use the following execution domain numbering:
4980 //
4981 enum ARMExeDomain {
4982   ExeGeneric = 0,
4983   ExeVFP = 1,
4984   ExeNEON = 2
4985 };
4986 
4987 //
4988 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
4989 //
4990 std::pair<uint16_t, uint16_t>
4991 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const {
4992   // If we don't have access to NEON instructions then we won't be able
4993   // to swizzle anything to the NEON domain. Check to make sure.
4994   if (Subtarget.hasNEON()) {
4995     // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
4996     // if they are not predicated.
4997     if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI))
4998       return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
4999 
5000     // CortexA9 is particularly picky about mixing the two and wants these
5001     // converted.
5002     if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) &&
5003         (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR ||
5004          MI.getOpcode() == ARM::VMOVS))
5005       return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
5006   }
5007   // No other instructions can be swizzled, so just determine their domain.
5008   unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask;
5009 
5010   if (Domain & ARMII::DomainNEON)
5011     return std::make_pair(ExeNEON, 0);
5012 
5013   // Certain instructions can go either way on Cortex-A8.
5014   // Treat them as NEON instructions.
5015   if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
5016     return std::make_pair(ExeNEON, 0);
5017 
5018   if (Domain & ARMII::DomainVFP)
5019     return std::make_pair(ExeVFP, 0);
5020 
5021   return std::make_pair(ExeGeneric, 0);
5022 }
5023 
5024 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
5025                                             unsigned SReg, unsigned &Lane) {
5026   unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5027   Lane = 0;
5028 
5029   if (DReg != ARM::NoRegister)
5030    return DReg;
5031 
5032   Lane = 1;
5033   DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5034 
5035   assert(DReg && "S-register with no D super-register?");
5036   return DReg;
5037 }
5038 
5039 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
5040 /// set ImplicitSReg to a register number that must be marked as implicit-use or
5041 /// zero if no register needs to be defined as implicit-use.
5042 ///
5043 /// If the function cannot determine if an SPR should be marked implicit use or
5044 /// not, it returns false.
5045 ///
5046 /// This function handles cases where an instruction is being modified from taking
5047 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
5048 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
5049 /// lane of the DPR).
5050 ///
5051 /// If the other SPR is defined, an implicit-use of it should be added. Else,
5052 /// (including the case where the DPR itself is defined), it should not.
5053 ///
5054 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
5055                                        MachineInstr &MI, unsigned DReg,
5056                                        unsigned Lane, unsigned &ImplicitSReg) {
5057   // If the DPR is defined or used already, the other SPR lane will be chained
5058   // correctly, so there is nothing to be done.
5059   if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) {
5060     ImplicitSReg = 0;
5061     return true;
5062   }
5063 
5064   // Otherwise we need to go searching to see if the SPR is set explicitly.
5065   ImplicitSReg = TRI->getSubReg(DReg,
5066                                 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5067   MachineBasicBlock::LivenessQueryResult LQR =
5068       MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
5069 
5070   if (LQR == MachineBasicBlock::LQR_Live)
5071     return true;
5072   else if (LQR == MachineBasicBlock::LQR_Unknown)
5073     return false;
5074 
5075   // If the register is known not to be live, there is no need to add an
5076   // implicit-use.
5077   ImplicitSReg = 0;
5078   return true;
5079 }
5080 
5081 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
5082                                           unsigned Domain) const {
5083   unsigned DstReg, SrcReg, DReg;
5084   unsigned Lane;
5085   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
5086   const TargetRegisterInfo *TRI = &getRegisterInfo();
5087   switch (MI.getOpcode()) {
5088   default:
5089     llvm_unreachable("cannot handle opcode!");
5090     break;
5091   case ARM::VMOVD:
5092     if (Domain != ExeNEON)
5093       break;
5094 
5095     // Zap the predicate operands.
5096     assert(!isPredicated(MI) && "Cannot predicate a VORRd");
5097 
5098     // Make sure we've got NEON instructions.
5099     assert(Subtarget.hasNEON() && "VORRd requires NEON");
5100 
5101     // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
5102     DstReg = MI.getOperand(0).getReg();
5103     SrcReg = MI.getOperand(1).getReg();
5104 
5105     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5106       MI.RemoveOperand(i - 1);
5107 
5108     // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
5109     MI.setDesc(get(ARM::VORRd));
5110     MIB.addReg(DstReg, RegState::Define)
5111         .addReg(SrcReg)
5112         .addReg(SrcReg)
5113         .add(predOps(ARMCC::AL));
5114     break;
5115   case ARM::VMOVRS:
5116     if (Domain != ExeNEON)
5117       break;
5118     assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
5119 
5120     // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
5121     DstReg = MI.getOperand(0).getReg();
5122     SrcReg = MI.getOperand(1).getReg();
5123 
5124     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5125       MI.RemoveOperand(i - 1);
5126 
5127     DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
5128 
5129     // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
5130     // Note that DSrc has been widened and the other lane may be undef, which
5131     // contaminates the entire register.
5132     MI.setDesc(get(ARM::VGETLNi32));
5133     MIB.addReg(DstReg, RegState::Define)
5134         .addReg(DReg, RegState::Undef)
5135         .addImm(Lane)
5136         .add(predOps(ARMCC::AL));
5137 
5138     // The old source should be an implicit use, otherwise we might think it
5139     // was dead before here.
5140     MIB.addReg(SrcReg, RegState::Implicit);
5141     break;
5142   case ARM::VMOVSR: {
5143     if (Domain != ExeNEON)
5144       break;
5145     assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
5146 
5147     // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
5148     DstReg = MI.getOperand(0).getReg();
5149     SrcReg = MI.getOperand(1).getReg();
5150 
5151     DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
5152 
5153     unsigned ImplicitSReg;
5154     if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
5155       break;
5156 
5157     for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5158       MI.RemoveOperand(i - 1);
5159 
5160     // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
5161     // Again DDst may be undefined at the beginning of this instruction.
5162     MI.setDesc(get(ARM::VSETLNi32));
5163     MIB.addReg(DReg, RegState::Define)
5164         .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI)))
5165         .addReg(SrcReg)
5166         .addImm(Lane)
5167         .add(predOps(ARMCC::AL));
5168 
5169     // The narrower destination must be marked as set to keep previous chains
5170     // in place.
5171     MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5172     if (ImplicitSReg != 0)
5173       MIB.addReg(ImplicitSReg, RegState::Implicit);
5174     break;
5175     }
5176     case ARM::VMOVS: {
5177       if (Domain != ExeNEON)
5178         break;
5179 
5180       // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
5181       DstReg = MI.getOperand(0).getReg();
5182       SrcReg = MI.getOperand(1).getReg();
5183 
5184       unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5185       DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
5186       DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
5187 
5188       unsigned ImplicitSReg;
5189       if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
5190         break;
5191 
5192       for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5193         MI.RemoveOperand(i - 1);
5194 
5195       if (DSrc == DDst) {
5196         // Destination can be:
5197         //     %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
5198         MI.setDesc(get(ARM::VDUPLN32d));
5199         MIB.addReg(DDst, RegState::Define)
5200             .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI)))
5201             .addImm(SrcLane)
5202             .add(predOps(ARMCC::AL));
5203 
5204         // Neither the source or the destination are naturally represented any
5205         // more, so add them in manually.
5206         MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
5207         MIB.addReg(SrcReg, RegState::Implicit);
5208         if (ImplicitSReg != 0)
5209           MIB.addReg(ImplicitSReg, RegState::Implicit);
5210         break;
5211       }
5212 
5213       // In general there's no single instruction that can perform an S <-> S
5214       // move in NEON space, but a pair of VEXT instructions *can* do the
5215       // job. It turns out that the VEXTs needed will only use DSrc once, with
5216       // the position based purely on the combination of lane-0 and lane-1
5217       // involved. For example
5218       //     vmov s0, s2 -> vext.32 d0, d0, d1, #1  vext.32 d0, d0, d0, #1
5219       //     vmov s1, s3 -> vext.32 d0, d1, d0, #1  vext.32 d0, d0, d0, #1
5220       //     vmov s0, s3 -> vext.32 d0, d0, d0, #1  vext.32 d0, d1, d0, #1
5221       //     vmov s1, s2 -> vext.32 d0, d0, d0, #1  vext.32 d0, d0, d1, #1
5222       //
5223       // Pattern of the MachineInstrs is:
5224       //     %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
5225       MachineInstrBuilder NewMIB;
5226       NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
5227                        DDst);
5228 
5229       // On the first instruction, both DSrc and DDst may be undef if present.
5230       // Specifically when the original instruction didn't have them as an
5231       // <imp-use>.
5232       unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5233       bool CurUndef = !MI.readsRegister(CurReg, TRI);
5234       NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
5235 
5236       CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5237       CurUndef = !MI.readsRegister(CurReg, TRI);
5238       NewMIB.addReg(CurReg, getUndefRegState(CurUndef))
5239             .addImm(1)
5240             .add(predOps(ARMCC::AL));
5241 
5242       if (SrcLane == DstLane)
5243         NewMIB.addReg(SrcReg, RegState::Implicit);
5244 
5245       MI.setDesc(get(ARM::VEXTd32));
5246       MIB.addReg(DDst, RegState::Define);
5247 
5248       // On the second instruction, DDst has definitely been defined above, so
5249       // it is not undef. DSrc, if present, can be undef as above.
5250       CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5251       CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5252       MIB.addReg(CurReg, getUndefRegState(CurUndef));
5253 
5254       CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5255       CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5256       MIB.addReg(CurReg, getUndefRegState(CurUndef))
5257          .addImm(1)
5258          .add(predOps(ARMCC::AL));
5259 
5260       if (SrcLane != DstLane)
5261         MIB.addReg(SrcReg, RegState::Implicit);
5262 
5263       // As before, the original destination is no longer represented, add it
5264       // implicitly.
5265       MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5266       if (ImplicitSReg != 0)
5267         MIB.addReg(ImplicitSReg, RegState::Implicit);
5268       break;
5269     }
5270   }
5271 }
5272 
5273 //===----------------------------------------------------------------------===//
5274 // Partial register updates
5275 //===----------------------------------------------------------------------===//
5276 //
5277 // Swift renames NEON registers with 64-bit granularity.  That means any
5278 // instruction writing an S-reg implicitly reads the containing D-reg.  The
5279 // problem is mostly avoided by translating f32 operations to v2f32 operations
5280 // on D-registers, but f32 loads are still a problem.
5281 //
5282 // These instructions can load an f32 into a NEON register:
5283 //
5284 // VLDRS - Only writes S, partial D update.
5285 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
5286 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
5287 //
5288 // FCONSTD can be used as a dependency-breaking instruction.
5289 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
5290     const MachineInstr &MI, unsigned OpNum,
5291     const TargetRegisterInfo *TRI) const {
5292   auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5293   if (!PartialUpdateClearance)
5294     return 0;
5295 
5296   assert(TRI && "Need TRI instance");
5297 
5298   const MachineOperand &MO = MI.getOperand(OpNum);
5299   if (MO.readsReg())
5300     return 0;
5301   Register Reg = MO.getReg();
5302   int UseOp = -1;
5303 
5304   switch (MI.getOpcode()) {
5305   // Normal instructions writing only an S-register.
5306   case ARM::VLDRS:
5307   case ARM::FCONSTS:
5308   case ARM::VMOVSR:
5309   case ARM::VMOVv8i8:
5310   case ARM::VMOVv4i16:
5311   case ARM::VMOVv2i32:
5312   case ARM::VMOVv2f32:
5313   case ARM::VMOVv1i64:
5314     UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
5315     break;
5316 
5317     // Explicitly reads the dependency.
5318   case ARM::VLD1LNd32:
5319     UseOp = 3;
5320     break;
5321   default:
5322     return 0;
5323   }
5324 
5325   // If this instruction actually reads a value from Reg, there is no unwanted
5326   // dependency.
5327   if (UseOp != -1 && MI.getOperand(UseOp).readsReg())
5328     return 0;
5329 
5330   // We must be able to clobber the whole D-reg.
5331   if (Register::isVirtualRegister(Reg)) {
5332     // Virtual register must be a def undef foo:ssub_0 operand.
5333     if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
5334       return 0;
5335   } else if (ARM::SPRRegClass.contains(Reg)) {
5336     // Physical register: MI must define the full D-reg.
5337     unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5338                                              &ARM::DPRRegClass);
5339     if (!DReg || !MI.definesRegister(DReg, TRI))
5340       return 0;
5341   }
5342 
5343   // MI has an unwanted D-register dependency.
5344   // Avoid defs in the previous N instructrions.
5345   return PartialUpdateClearance;
5346 }
5347 
5348 // Break a partial register dependency after getPartialRegUpdateClearance
5349 // returned non-zero.
5350 void ARMBaseInstrInfo::breakPartialRegDependency(
5351     MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5352   assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def");
5353   assert(TRI && "Need TRI instance");
5354 
5355   const MachineOperand &MO = MI.getOperand(OpNum);
5356   Register Reg = MO.getReg();
5357   assert(Register::isPhysicalRegister(Reg) &&
5358          "Can't break virtual register dependencies.");
5359   unsigned DReg = Reg;
5360 
5361   // If MI defines an S-reg, find the corresponding D super-register.
5362   if (ARM::SPRRegClass.contains(Reg)) {
5363     DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5364     assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
5365   }
5366 
5367   assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
5368   assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
5369 
5370   // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
5371   // the full D-register by loading the same value to both lanes.  The
5372   // instruction is micro-coded with 2 uops, so don't do this until we can
5373   // properly schedule micro-coded instructions.  The dispatcher stalls cause
5374   // too big regressions.
5375 
5376   // Insert the dependency-breaking FCONSTD before MI.
5377   // 96 is the encoding of 0.5, but the actual value doesn't matter here.
5378   BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg)
5379       .addImm(96)
5380       .add(predOps(ARMCC::AL));
5381   MI.addRegisterKilled(DReg, TRI, true);
5382 }
5383 
5384 bool ARMBaseInstrInfo::hasNOP() const {
5385   return Subtarget.getFeatureBits()[ARM::HasV6KOps];
5386 }
5387 
5388 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
5389   if (MI->getNumOperands() < 4)
5390     return true;
5391   unsigned ShOpVal = MI->getOperand(3).getImm();
5392   unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
5393   // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
5394   if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) ||
5395       ((ShImm == 1 || ShImm == 2) &&
5396        ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl))
5397     return true;
5398 
5399   return false;
5400 }
5401 
5402 bool ARMBaseInstrInfo::getRegSequenceLikeInputs(
5403     const MachineInstr &MI, unsigned DefIdx,
5404     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
5405   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5406   assert(MI.isRegSequenceLike() && "Invalid kind of instruction");
5407 
5408   switch (MI.getOpcode()) {
5409   case ARM::VMOVDRR:
5410     // dX = VMOVDRR rY, rZ
5411     // is the same as:
5412     // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1
5413     // Populate the InputRegs accordingly.
5414     // rY
5415     const MachineOperand *MOReg = &MI.getOperand(1);
5416     if (!MOReg->isUndef())
5417       InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5418                                               MOReg->getSubReg(), ARM::ssub_0));
5419     // rZ
5420     MOReg = &MI.getOperand(2);
5421     if (!MOReg->isUndef())
5422       InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5423                                               MOReg->getSubReg(), ARM::ssub_1));
5424     return true;
5425   }
5426   llvm_unreachable("Target dependent opcode missing");
5427 }
5428 
5429 bool ARMBaseInstrInfo::getExtractSubregLikeInputs(
5430     const MachineInstr &MI, unsigned DefIdx,
5431     RegSubRegPairAndIdx &InputReg) const {
5432   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5433   assert(MI.isExtractSubregLike() && "Invalid kind of instruction");
5434 
5435   switch (MI.getOpcode()) {
5436   case ARM::VMOVRRD:
5437     // rX, rY = VMOVRRD dZ
5438     // is the same as:
5439     // rX = EXTRACT_SUBREG dZ, ssub_0
5440     // rY = EXTRACT_SUBREG dZ, ssub_1
5441     const MachineOperand &MOReg = MI.getOperand(2);
5442     if (MOReg.isUndef())
5443       return false;
5444     InputReg.Reg = MOReg.getReg();
5445     InputReg.SubReg = MOReg.getSubReg();
5446     InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5447     return true;
5448   }
5449   llvm_unreachable("Target dependent opcode missing");
5450 }
5451 
5452 bool ARMBaseInstrInfo::getInsertSubregLikeInputs(
5453     const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg,
5454     RegSubRegPairAndIdx &InsertedReg) const {
5455   assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5456   assert(MI.isInsertSubregLike() && "Invalid kind of instruction");
5457 
5458   switch (MI.getOpcode()) {
5459   case ARM::VSETLNi32:
5460   case ARM::MVE_VMOV_to_lane_32:
5461     // dX = VSETLNi32 dY, rZ, imm
5462     // qX = MVE_VMOV_to_lane_32 qY, rZ, imm
5463     const MachineOperand &MOBaseReg = MI.getOperand(1);
5464     const MachineOperand &MOInsertedReg = MI.getOperand(2);
5465     if (MOInsertedReg.isUndef())
5466       return false;
5467     const MachineOperand &MOIndex = MI.getOperand(3);
5468     BaseReg.Reg = MOBaseReg.getReg();
5469     BaseReg.SubReg = MOBaseReg.getSubReg();
5470 
5471     InsertedReg.Reg = MOInsertedReg.getReg();
5472     InsertedReg.SubReg = MOInsertedReg.getSubReg();
5473     InsertedReg.SubIdx = ARM::ssub_0 + MOIndex.getImm();
5474     return true;
5475   }
5476   llvm_unreachable("Target dependent opcode missing");
5477 }
5478 
5479 std::pair<unsigned, unsigned>
5480 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
5481   const unsigned Mask = ARMII::MO_OPTION_MASK;
5482   return std::make_pair(TF & Mask, TF & ~Mask);
5483 }
5484 
5485 ArrayRef<std::pair<unsigned, const char *>>
5486 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
5487   using namespace ARMII;
5488 
5489   static const std::pair<unsigned, const char *> TargetFlags[] = {
5490       {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}};
5491   return makeArrayRef(TargetFlags);
5492 }
5493 
5494 ArrayRef<std::pair<unsigned, const char *>>
5495 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
5496   using namespace ARMII;
5497 
5498   static const std::pair<unsigned, const char *> TargetFlags[] = {
5499       {MO_COFFSTUB, "arm-coffstub"},
5500       {MO_GOT, "arm-got"},
5501       {MO_SBREL, "arm-sbrel"},
5502       {MO_DLLIMPORT, "arm-dllimport"},
5503       {MO_SECREL, "arm-secrel"},
5504       {MO_NONLAZY, "arm-nonlazy"}};
5505   return makeArrayRef(TargetFlags);
5506 }
5507 
5508 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI,
5509                                                       Register Reg) const {
5510   int Sign = 1;
5511   unsigned Opcode = MI.getOpcode();
5512   int64_t Offset = 0;
5513 
5514   // TODO: Handle cases where Reg is a super- or sub-register of the
5515   // destination register.
5516   const MachineOperand &Op0 = MI.getOperand(0);
5517   if (!Op0.isReg() || Reg != Op0.getReg())
5518     return None;
5519 
5520   // We describe SUBri or ADDri instructions.
5521   if (Opcode == ARM::SUBri)
5522     Sign = -1;
5523   else if (Opcode != ARM::ADDri)
5524     return None;
5525 
5526   // TODO: Third operand can be global address (usually some string). Since
5527   //       strings can be relocated we cannot calculate their offsets for
5528   //       now.
5529   if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm())
5530     return None;
5531 
5532   Offset = MI.getOperand(2).getImm() * Sign;
5533   return RegImmPair{MI.getOperand(1).getReg(), Offset};
5534 }
5535 
5536 bool llvm::registerDefinedBetween(unsigned Reg,
5537                                   MachineBasicBlock::iterator From,
5538                                   MachineBasicBlock::iterator To,
5539                                   const TargetRegisterInfo *TRI) {
5540   for (auto I = From; I != To; ++I)
5541     if (I->modifiesRegister(Reg, TRI))
5542       return true;
5543   return false;
5544 }
5545 
5546 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br,
5547                                          const TargetRegisterInfo *TRI) {
5548   // Search backwards to the instruction that defines CSPR. This may or not
5549   // be a CMP, we check that after this loop. If we find another instruction
5550   // that reads cpsr, we return nullptr.
5551   MachineBasicBlock::iterator CmpMI = Br;
5552   while (CmpMI != Br->getParent()->begin()) {
5553     --CmpMI;
5554     if (CmpMI->modifiesRegister(ARM::CPSR, TRI))
5555       break;
5556     if (CmpMI->readsRegister(ARM::CPSR, TRI))
5557       break;
5558   }
5559 
5560   // Check that this inst is a CMP r[0-7], #0 and that the register
5561   // is not redefined between the cmp and the br.
5562   if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5563     return nullptr;
5564   Register Reg = CmpMI->getOperand(0).getReg();
5565   Register PredReg;
5566   ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg);
5567   if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5568     return nullptr;
5569   if (!isARMLowRegister(Reg))
5570     return nullptr;
5571   if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI))
5572     return nullptr;
5573 
5574   return &*CmpMI;
5575 }
5576 
5577 unsigned llvm::ConstantMaterializationCost(unsigned Val,
5578                                            const ARMSubtarget *Subtarget,
5579                                            bool ForCodesize) {
5580   if (Subtarget->isThumb()) {
5581     if (Val <= 255) // MOV
5582       return ForCodesize ? 2 : 1;
5583     if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||                    // MOV
5584                                     ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW
5585                                     ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN
5586       return ForCodesize ? 4 : 1;
5587     if (Val <= 510) // MOV + ADDi8
5588       return ForCodesize ? 4 : 2;
5589     if (~Val <= 255) // MOV + MVN
5590       return ForCodesize ? 4 : 2;
5591     if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL
5592       return ForCodesize ? 4 : 2;
5593   } else {
5594     if (ARM_AM::getSOImmVal(Val) != -1) // MOV
5595       return ForCodesize ? 4 : 1;
5596     if (ARM_AM::getSOImmVal(~Val) != -1) // MVN
5597       return ForCodesize ? 4 : 1;
5598     if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW
5599       return ForCodesize ? 4 : 1;
5600     if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs
5601       return ForCodesize ? 8 : 2;
5602     if (ARM_AM::isSOImmTwoPartValNeg(Val)) // two instrs
5603       return ForCodesize ? 8 : 2;
5604   }
5605   if (Subtarget->useMovt()) // MOVW + MOVT
5606     return ForCodesize ? 8 : 2;
5607   return ForCodesize ? 8 : 3; // Literal pool load
5608 }
5609 
5610 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2,
5611                                                const ARMSubtarget *Subtarget,
5612                                                bool ForCodesize) {
5613   // Check with ForCodesize
5614   unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize);
5615   unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize);
5616   if (Cost1 < Cost2)
5617     return true;
5618   if (Cost1 > Cost2)
5619     return false;
5620 
5621   // If they are equal, try with !ForCodesize
5622   return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) <
5623          ConstantMaterializationCost(Val2, Subtarget, !ForCodesize);
5624 }
5625 
5626 /// Constants defining how certain sequences should be outlined.
5627 /// This encompasses how an outlined function should be called, and what kind of
5628 /// frame should be emitted for that outlined function.
5629 ///
5630 /// \p MachineOutlinerTailCall implies that the function is being created from
5631 /// a sequence of instructions ending in a return.
5632 ///
5633 /// That is,
5634 ///
5635 /// I1                                OUTLINED_FUNCTION:
5636 /// I2    --> B OUTLINED_FUNCTION     I1
5637 /// BX LR                             I2
5638 ///                                   BX LR
5639 ///
5640 /// +-------------------------+--------+-----+
5641 /// |                         | Thumb2 | ARM |
5642 /// +-------------------------+--------+-----+
5643 /// | Call overhead in Bytes  |      4 |   4 |
5644 /// | Frame overhead in Bytes |      0 |   0 |
5645 /// | Stack fixup required    |     No |  No |
5646 /// +-------------------------+--------+-----+
5647 ///
5648 /// \p MachineOutlinerThunk implies that the function is being created from
5649 /// a sequence of instructions ending in a call. The outlined function is
5650 /// called with a BL instruction, and the outlined function tail-calls the
5651 /// original call destination.
5652 ///
5653 /// That is,
5654 ///
5655 /// I1                                OUTLINED_FUNCTION:
5656 /// I2   --> BL OUTLINED_FUNCTION     I1
5657 /// BL f                              I2
5658 ///                                   B f
5659 ///
5660 /// +-------------------------+--------+-----+
5661 /// |                         | Thumb2 | ARM |
5662 /// +-------------------------+--------+-----+
5663 /// | Call overhead in Bytes  |      4 |   4 |
5664 /// | Frame overhead in Bytes |      0 |   0 |
5665 /// | Stack fixup required    |     No |  No |
5666 /// +-------------------------+--------+-----+
5667 ///
5668 /// \p MachineOutlinerNoLRSave implies that the function should be called using
5669 /// a BL instruction, but doesn't require LR to be saved and restored. This
5670 /// happens when LR is known to be dead.
5671 ///
5672 /// That is,
5673 ///
5674 /// I1                                OUTLINED_FUNCTION:
5675 /// I2 --> BL OUTLINED_FUNCTION       I1
5676 /// I3                                I2
5677 ///                                   I3
5678 ///                                   BX LR
5679 ///
5680 /// +-------------------------+--------+-----+
5681 /// |                         | Thumb2 | ARM |
5682 /// +-------------------------+--------+-----+
5683 /// | Call overhead in Bytes  |      4 |   4 |
5684 /// | Frame overhead in Bytes |      2 |   4 |
5685 /// | Stack fixup required    |     No |  No |
5686 /// +-------------------------+--------+-----+
5687 ///
5688 /// \p MachineOutlinerRegSave implies that the function should be called with a
5689 /// save and restore of LR to an available register. This allows us to avoid
5690 /// stack fixups. Note that this outlining variant is compatible with the
5691 /// NoLRSave case.
5692 ///
5693 /// That is,
5694 ///
5695 /// I1     Save LR                    OUTLINED_FUNCTION:
5696 /// I2 --> BL OUTLINED_FUNCTION       I1
5697 /// I3     Restore LR                 I2
5698 ///                                   I3
5699 ///                                   BX LR
5700 ///
5701 /// +-------------------------+--------+-----+
5702 /// |                         | Thumb2 | ARM |
5703 /// +-------------------------+--------+-----+
5704 /// | Call overhead in Bytes  |      8 |  12 |
5705 /// | Frame overhead in Bytes |      2 |   4 |
5706 /// | Stack fixup required    |     No |  No |
5707 /// +-------------------------+--------+-----+
5708 ///
5709 /// \p MachineOutlinerDefault implies that the function should be called with
5710 /// a save and restore of LR to the stack.
5711 ///
5712 /// That is,
5713 ///
5714 /// I1     Save LR                    OUTLINED_FUNCTION:
5715 /// I2 --> BL OUTLINED_FUNCTION       I1
5716 /// I3     Restore LR                 I2
5717 ///                                   I3
5718 ///                                   BX LR
5719 ///
5720 /// +-------------------------+--------+-----+
5721 /// |                         | Thumb2 | ARM |
5722 /// +-------------------------+--------+-----+
5723 /// | Call overhead in Bytes  |      8 |  12 |
5724 /// | Frame overhead in Bytes |      2 |   4 |
5725 /// | Stack fixup required    |    Yes | Yes |
5726 /// +-------------------------+--------+-----+
5727 
5728 enum MachineOutlinerClass {
5729   MachineOutlinerTailCall,
5730   MachineOutlinerThunk,
5731   MachineOutlinerNoLRSave,
5732   MachineOutlinerRegSave,
5733   MachineOutlinerDefault
5734 };
5735 
5736 enum MachineOutlinerMBBFlags {
5737   LRUnavailableSomewhere = 0x2,
5738   HasCalls = 0x4,
5739   UnsafeRegsDead = 0x8
5740 };
5741 
5742 struct OutlinerCosts {
5743   int CallTailCall;
5744   int FrameTailCall;
5745   int CallThunk;
5746   int FrameThunk;
5747   int CallNoLRSave;
5748   int FrameNoLRSave;
5749   int CallRegSave;
5750   int FrameRegSave;
5751   int CallDefault;
5752   int FrameDefault;
5753   int SaveRestoreLROnStack;
5754 
5755   OutlinerCosts(const ARMSubtarget &target)
5756       : CallTailCall(target.isThumb() ? 4 : 4),
5757         FrameTailCall(target.isThumb() ? 0 : 0),
5758         CallThunk(target.isThumb() ? 4 : 4),
5759         FrameThunk(target.isThumb() ? 0 : 0),
5760         CallNoLRSave(target.isThumb() ? 4 : 4),
5761         FrameNoLRSave(target.isThumb() ? 2 : 4),
5762         CallRegSave(target.isThumb() ? 8 : 12),
5763         FrameRegSave(target.isThumb() ? 2 : 4),
5764         CallDefault(target.isThumb() ? 8 : 12),
5765         FrameDefault(target.isThumb() ? 2 : 4),
5766         SaveRestoreLROnStack(target.isThumb() ? 8 : 8) {}
5767 };
5768 
5769 unsigned
5770 ARMBaseInstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
5771   assert(C.LRUWasSet && "LRU wasn't set?");
5772   MachineFunction *MF = C.getMF();
5773   const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo *>(
5774       MF->getSubtarget().getRegisterInfo());
5775 
5776   BitVector regsReserved = ARI->getReservedRegs(*MF);
5777   // Check if there is an available register across the sequence that we can
5778   // use.
5779   for (unsigned Reg : ARM::rGPRRegClass) {
5780     if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) &&
5781         Reg != ARM::LR &&  // LR is not reserved, but don't use it.
5782         Reg != ARM::R12 && // R12 is not guaranteed to be preserved.
5783         C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
5784       return Reg;
5785   }
5786 
5787   // No suitable register. Return 0.
5788   return 0u;
5789 }
5790 
5791 // Compute liveness of LR at the point after the interval [I, E), which
5792 // denotes a *backward* iteration through instructions. Used only for return
5793 // basic blocks, which do not end with a tail call.
5794 static bool isLRAvailable(const TargetRegisterInfo &TRI,
5795                           MachineBasicBlock::reverse_iterator I,
5796                           MachineBasicBlock::reverse_iterator E) {
5797   // At the end of the function LR dead.
5798   bool Live = false;
5799   for (; I != E; ++I) {
5800     const MachineInstr &MI = *I;
5801 
5802     // Check defs of LR.
5803     if (MI.modifiesRegister(ARM::LR, &TRI))
5804       Live = false;
5805 
5806     // Check uses of LR.
5807     unsigned Opcode = MI.getOpcode();
5808     if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5809         Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5810         Opcode == ARM::tBXNS_RET) {
5811       // These instructions use LR, but it's not an (explicit or implicit)
5812       // operand.
5813       Live = true;
5814       continue;
5815     }
5816     if (MI.readsRegister(ARM::LR, &TRI))
5817       Live = true;
5818   }
5819   return !Live;
5820 }
5821 
5822 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo(
5823     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
5824   outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
5825   unsigned SequenceSize =
5826       std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
5827                       [this](unsigned Sum, const MachineInstr &MI) {
5828                         return Sum + getInstSizeInBytes(MI);
5829                       });
5830 
5831   // Properties about candidate MBBs that hold for all of them.
5832   unsigned FlagsSetInAll = 0xF;
5833 
5834   // Compute liveness information for each candidate, and set FlagsSetInAll.
5835   const TargetRegisterInfo &TRI = getRegisterInfo();
5836   std::for_each(
5837       RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
5838       [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; });
5839 
5840   // According to the ARM Procedure Call Standard, the following are
5841   // undefined on entry/exit from a function call:
5842   //
5843   // * Register R12(IP),
5844   // * Condition codes (and thus the CPSR register)
5845   //
5846   // Since we control the instructions which are part of the outlined regions
5847   // we don't need to be fully compliant with the AAPCS, but we have to
5848   // guarantee that if a veneer is inserted at link time the code is still
5849   // correct.  Because of this, we can't outline any sequence of instructions
5850   // where one of these registers is live into/across it. Thus, we need to
5851   // delete those candidates.
5852   auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
5853     // If the unsafe registers in this block are all dead, then we don't need
5854     // to compute liveness here.
5855     if (C.Flags & UnsafeRegsDead)
5856       return false;
5857     C.initLRU(TRI);
5858     LiveRegUnits LRU = C.LRU;
5859     return (!LRU.available(ARM::R12) || !LRU.available(ARM::CPSR));
5860   };
5861 
5862   // Are there any candidates where those registers are live?
5863   if (!(FlagsSetInAll & UnsafeRegsDead)) {
5864     // Erase every candidate that violates the restrictions above. (It could be
5865     // true that we have viable candidates, so it's not worth bailing out in
5866     // the case that, say, 1 out of 20 candidates violate the restructions.)
5867     llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5868 
5869     // If the sequence doesn't have enough candidates left, then we're done.
5870     if (RepeatedSequenceLocs.size() < 2)
5871       return outliner::OutlinedFunction();
5872   }
5873 
5874   // We expect the majority of the outlining candidates to be in consensus with
5875   // regard to return address sign and authentication, and branch target
5876   // enforcement, in other words, partitioning according to all the four
5877   // possible combinations of PAC-RET and BTI is going to yield one big subset
5878   // and three small (likely empty) subsets. That allows us to cull incompatible
5879   // candidates separately for PAC-RET and BTI.
5880 
5881   // Partition the candidates in two sets: one with BTI enabled and one with BTI
5882   // disabled. Remove the candidates from the smaller set. If they are the same
5883   // number prefer the non-BTI ones for outlining, since they have less
5884   // overhead.
5885   auto NoBTI =
5886       llvm::partition(RepeatedSequenceLocs, [](const outliner::Candidate &C) {
5887         const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>();
5888         return AFI.branchTargetEnforcement();
5889       });
5890   if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5891       std::distance(NoBTI, RepeatedSequenceLocs.end()))
5892     RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5893   else
5894     RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5895 
5896   if (RepeatedSequenceLocs.size() < 2)
5897     return outliner::OutlinedFunction();
5898 
5899   // Likewise, partition the candidates according to PAC-RET enablement.
5900   auto NoPAC =
5901       llvm::partition(RepeatedSequenceLocs, [](const outliner::Candidate &C) {
5902         const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>();
5903         // If the function happens to not spill the LR, do not disqualify it
5904         // from the outlining.
5905         return AFI.shouldSignReturnAddress(true);
5906       });
5907   if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5908       std::distance(NoPAC, RepeatedSequenceLocs.end()))
5909     RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5910   else
5911     RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5912 
5913   if (RepeatedSequenceLocs.size() < 2)
5914     return outliner::OutlinedFunction();
5915 
5916   // At this point, we have only "safe" candidates to outline. Figure out
5917   // frame + call instruction information.
5918 
5919   unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
5920 
5921   // Helper lambda which sets call information for every candidate.
5922   auto SetCandidateCallInfo =
5923       [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
5924         for (outliner::Candidate &C : RepeatedSequenceLocs)
5925           C.setCallInfo(CallID, NumBytesForCall);
5926       };
5927 
5928   OutlinerCosts Costs(Subtarget);
5929 
5930   const auto &SomeMFI =
5931       *RepeatedSequenceLocs.front().getMF()->getInfo<ARMFunctionInfo>();
5932   // Adjust costs to account for the BTI instructions.
5933   if (SomeMFI.branchTargetEnforcement()) {
5934     Costs.FrameDefault += 4;
5935     Costs.FrameNoLRSave += 4;
5936     Costs.FrameRegSave += 4;
5937     Costs.FrameTailCall += 4;
5938     Costs.FrameThunk += 4;
5939   }
5940 
5941   // Adjust costs to account for sign and authentication instructions.
5942   if (SomeMFI.shouldSignReturnAddress(true)) {
5943     Costs.CallDefault += 8;          // +PAC instr, +AUT instr
5944     Costs.SaveRestoreLROnStack += 8; // +PAC instr, +AUT instr
5945   }
5946 
5947   unsigned FrameID = MachineOutlinerDefault;
5948   unsigned NumBytesToCreateFrame = Costs.FrameDefault;
5949 
5950   // If the last instruction in any candidate is a terminator, then we should
5951   // tail call all of the candidates.
5952   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
5953     FrameID = MachineOutlinerTailCall;
5954     NumBytesToCreateFrame = Costs.FrameTailCall;
5955     SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall);
5956   } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5957              LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5958              LastInstrOpcode == ARM::tBLXr ||
5959              LastInstrOpcode == ARM::tBLXr_noip ||
5960              LastInstrOpcode == ARM::tBLXi) {
5961     FrameID = MachineOutlinerThunk;
5962     NumBytesToCreateFrame = Costs.FrameThunk;
5963     SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk);
5964   } else {
5965     // We need to decide how to emit calls + frames. We can always emit the same
5966     // frame if we don't need to save to the stack. If we have to save to the
5967     // stack, then we need a different frame.
5968     unsigned NumBytesNoStackCalls = 0;
5969     std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5970 
5971     for (outliner::Candidate &C : RepeatedSequenceLocs) {
5972       C.initLRU(TRI);
5973       // LR liveness is overestimated in return blocks, unless they end with a
5974       // tail call.
5975       const auto Last = C.getMBB()->rbegin();
5976       const bool LRIsAvailable =
5977           C.getMBB()->isReturnBlock() && !Last->isCall()
5978               ? isLRAvailable(TRI, Last,
5979                               (MachineBasicBlock::reverse_iterator)C.front())
5980               : C.LRU.available(ARM::LR);
5981       if (LRIsAvailable) {
5982         FrameID = MachineOutlinerNoLRSave;
5983         NumBytesNoStackCalls += Costs.CallNoLRSave;
5984         C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave);
5985         CandidatesWithoutStackFixups.push_back(C);
5986       }
5987 
5988       // Is an unused register available? If so, we won't modify the stack, so
5989       // we can outline with the same frame type as those that don't save LR.
5990       else if (findRegisterToSaveLRTo(C)) {
5991         FrameID = MachineOutlinerRegSave;
5992         NumBytesNoStackCalls += Costs.CallRegSave;
5993         C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave);
5994         CandidatesWithoutStackFixups.push_back(C);
5995       }
5996 
5997       // Is SP used in the sequence at all? If not, we don't have to modify
5998       // the stack, so we are guaranteed to get the same frame.
5999       else if (C.UsedInSequence.available(ARM::SP)) {
6000         NumBytesNoStackCalls += Costs.CallDefault;
6001         C.setCallInfo(MachineOutlinerDefault, Costs.CallDefault);
6002         CandidatesWithoutStackFixups.push_back(C);
6003       }
6004 
6005       // If we outline this, we need to modify the stack. Pretend we don't
6006       // outline this by saving all of its bytes.
6007       else
6008         NumBytesNoStackCalls += SequenceSize;
6009     }
6010 
6011     // If there are no places where we have to save LR, then note that we don't
6012     // have to update the stack. Otherwise, give every candidate the default
6013     // call type
6014     if (NumBytesNoStackCalls <=
6015         RepeatedSequenceLocs.size() * Costs.CallDefault) {
6016       RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6017       FrameID = MachineOutlinerNoLRSave;
6018     } else
6019       SetCandidateCallInfo(MachineOutlinerDefault, Costs.CallDefault);
6020   }
6021 
6022   // Does every candidate's MBB contain a call?  If so, then we might have a
6023   // call in the range.
6024   if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
6025     // check if the range contains a call.  These require a save + restore of
6026     // the link register.
6027     if (std::any_of(FirstCand.front(), FirstCand.back(),
6028                     [](const MachineInstr &MI) { return MI.isCall(); }))
6029       NumBytesToCreateFrame += Costs.SaveRestoreLROnStack;
6030 
6031     // Handle the last instruction separately.  If it is tail call, then the
6032     // last instruction is a call, we don't want to save + restore in this
6033     // case.  However, it could be possible that the last instruction is a
6034     // call without it being valid to tail call this sequence.  We should
6035     // consider this as well.
6036     else if (FrameID != MachineOutlinerThunk &&
6037              FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
6038       NumBytesToCreateFrame += Costs.SaveRestoreLROnStack;
6039   }
6040 
6041   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
6042                                     NumBytesToCreateFrame, FrameID);
6043 }
6044 
6045 bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI,
6046                                                  int64_t Fixup,
6047                                                  bool Updt) const {
6048   int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP);
6049   unsigned AddrMode = (MI->getDesc().TSFlags & ARMII::AddrModeMask);
6050   if (SPIdx < 0)
6051     // No SP operand
6052     return true;
6053   else if (SPIdx != 1 && (AddrMode != ARMII::AddrModeT2_i8s4 || SPIdx != 2))
6054     // If SP is not the base register we can't do much
6055     return false;
6056 
6057   // Stack might be involved but addressing mode doesn't handle any offset.
6058   // Rq: AddrModeT1_[1|2|4] don't operate on SP
6059   if (AddrMode == ARMII::AddrMode1 ||       // Arithmetic instructions
6060       AddrMode == ARMII::AddrMode4 ||       // Load/Store Multiple
6061       AddrMode == ARMII::AddrMode6 ||       // Neon Load/Store Multiple
6062       AddrMode == ARMII::AddrModeT2_so ||   // SP can't be used as based register
6063       AddrMode == ARMII::AddrModeT2_pc ||   // PCrel access
6064       AddrMode == ARMII::AddrMode2 ||       // Used by PRE and POST indexed LD/ST
6065       AddrMode == ARMII::AddrModeT2_i7 ||   // v8.1-M MVE
6066       AddrMode == ARMII::AddrModeT2_i7s2 || // v8.1-M MVE
6067       AddrMode == ARMII::AddrModeT2_i7s4 || // v8.1-M sys regs VLDR/VSTR
6068       AddrMode == ARMII::AddrModeNone ||
6069       AddrMode == ARMII::AddrModeT2_i8 ||   // Pre/Post inc instructions
6070       AddrMode == ARMII::AddrModeT2_i8neg)  // Always negative imm
6071     return false;
6072 
6073   unsigned NumOps = MI->getDesc().getNumOperands();
6074   unsigned ImmIdx = NumOps - 3;
6075 
6076   const MachineOperand &Offset = MI->getOperand(ImmIdx);
6077   assert(Offset.isImm() && "Is not an immediate");
6078   int64_t OffVal = Offset.getImm();
6079 
6080   if (OffVal < 0)
6081     // Don't override data if the are below SP.
6082     return false;
6083 
6084   unsigned NumBits = 0;
6085   unsigned Scale = 1;
6086 
6087   switch (AddrMode) {
6088   case ARMII::AddrMode3:
6089     if (ARM_AM::getAM3Op(OffVal) == ARM_AM::sub)
6090       return false;
6091     OffVal = ARM_AM::getAM3Offset(OffVal);
6092     NumBits = 8;
6093     break;
6094   case ARMII::AddrMode5:
6095     if (ARM_AM::getAM5Op(OffVal) == ARM_AM::sub)
6096       return false;
6097     OffVal = ARM_AM::getAM5Offset(OffVal);
6098     NumBits = 8;
6099     Scale = 4;
6100     break;
6101   case ARMII::AddrMode5FP16:
6102     if (ARM_AM::getAM5FP16Op(OffVal) == ARM_AM::sub)
6103       return false;
6104     OffVal = ARM_AM::getAM5FP16Offset(OffVal);
6105     NumBits = 8;
6106     Scale = 2;
6107     break;
6108   case ARMII::AddrModeT2_i8pos:
6109     NumBits = 8;
6110     break;
6111   case ARMII::AddrModeT2_i8s4:
6112     // FIXME: Values are already scaled in this addressing mode.
6113     assert((Fixup & 3) == 0 && "Can't encode this offset!");
6114     NumBits = 10;
6115     break;
6116   case ARMII::AddrModeT2_ldrex:
6117     NumBits = 8;
6118     Scale = 4;
6119     break;
6120   case ARMII::AddrModeT2_i12:
6121   case ARMII::AddrMode_i12:
6122     NumBits = 12;
6123     break;
6124   case ARMII::AddrModeT1_s: // SP-relative LD/ST
6125     NumBits = 8;
6126     Scale = 4;
6127     break;
6128   default:
6129     llvm_unreachable("Unsupported addressing mode!");
6130   }
6131   // Make sure the offset is encodable for instructions that scale the
6132   // immediate.
6133   assert(((OffVal * Scale + Fixup) & (Scale - 1)) == 0 &&
6134          "Can't encode this offset!");
6135   OffVal += Fixup / Scale;
6136 
6137   unsigned Mask = (1 << NumBits) - 1;
6138 
6139   if (OffVal <= Mask) {
6140     if (Updt)
6141       MI->getOperand(ImmIdx).setImm(OffVal);
6142     return true;
6143   }
6144 
6145   return false;
6146 }
6147 
6148 void ARMBaseInstrInfo::mergeOutliningCandidateAttributes(
6149     Function &F, std::vector<outliner::Candidate> &Candidates) const {
6150   outliner::Candidate &C = Candidates.front();
6151   // branch-target-enforcement is guaranteed to be consistent between all
6152   // candidates, so we only need to look at one.
6153   const Function &CFn = C.getMF()->getFunction();
6154   if (CFn.hasFnAttribute("branch-target-enforcement"))
6155     F.addFnAttr(CFn.getFnAttribute("branch-target-enforcement"));
6156 
6157   ARMGenInstrInfo::mergeOutliningCandidateAttributes(F, Candidates);
6158 }
6159 
6160 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom(
6161     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
6162   const Function &F = MF.getFunction();
6163 
6164   // Can F be deduplicated by the linker? If it can, don't outline from it.
6165   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
6166     return false;
6167 
6168   // Don't outline from functions with section markings; the program could
6169   // expect that all the code is in the named section.
6170   // FIXME: Allow outlining from multiple functions with the same section
6171   // marking.
6172   if (F.hasSection())
6173     return false;
6174 
6175   // FIXME: Thumb1 outlining is not handled
6176   if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction())
6177     return false;
6178 
6179   // It's safe to outline from MF.
6180   return true;
6181 }
6182 
6183 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
6184                                               unsigned &Flags) const {
6185   // Check if LR is available through all of the MBB. If it's not, then set
6186   // a flag.
6187   assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
6188          "Suitable Machine Function for outlining must track liveness");
6189 
6190   LiveRegUnits LRU(getRegisterInfo());
6191 
6192   std::for_each(MBB.rbegin(), MBB.rend(),
6193                 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
6194 
6195   // Check if each of the unsafe registers are available...
6196   bool R12AvailableInBlock = LRU.available(ARM::R12);
6197   bool CPSRAvailableInBlock = LRU.available(ARM::CPSR);
6198 
6199   // If all of these are dead (and not live out), we know we don't have to check
6200   // them later.
6201   if (R12AvailableInBlock && CPSRAvailableInBlock)
6202     Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6203 
6204   // Now, add the live outs to the set.
6205   LRU.addLiveOuts(MBB);
6206 
6207   // If any of these registers is available in the MBB, but also a live out of
6208   // the block, then we know outlining is unsafe.
6209   if (R12AvailableInBlock && !LRU.available(ARM::R12))
6210     return false;
6211   if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR))
6212     return false;
6213 
6214   // Check if there's a call inside this MachineBasicBlock.  If there is, then
6215   // set a flag.
6216   if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
6217     Flags |= MachineOutlinerMBBFlags::HasCalls;
6218 
6219   // LR liveness is overestimated in return blocks.
6220 
6221   bool LRIsAvailable =
6222       MBB.isReturnBlock() && !MBB.back().isCall()
6223           ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend())
6224           : LRU.available(ARM::LR);
6225   if (!LRIsAvailable)
6226     Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6227 
6228   return true;
6229 }
6230 
6231 outliner::InstrType
6232 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
6233                                    unsigned Flags) const {
6234   MachineInstr &MI = *MIT;
6235   const TargetRegisterInfo *TRI = &getRegisterInfo();
6236 
6237   // Be conservative with inline ASM
6238   if (MI.isInlineAsm())
6239     return outliner::InstrType::Illegal;
6240 
6241   // Don't allow debug values to impact outlining type.
6242   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
6243     return outliner::InstrType::Invisible;
6244 
6245   // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much
6246   // so we can go ahead and skip over them.
6247   if (MI.isKill() || MI.isImplicitDef())
6248     return outliner::InstrType::Invisible;
6249 
6250   // PIC instructions contain labels, outlining them would break offset
6251   // computing.  unsigned Opc = MI.getOpcode();
6252   unsigned Opc = MI.getOpcode();
6253   if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6254       Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6255       Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6256       Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6257       Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6258       Opc == ARM::t2MOV_ga_pcrel)
6259     return outliner::InstrType::Illegal;
6260 
6261   // Be conservative with ARMv8.1 MVE instructions.
6262   if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6263       Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6264       Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6265       Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6266       Opc == ARM::t2LoopEndDec)
6267     return outliner::InstrType::Illegal;
6268 
6269   const MCInstrDesc &MCID = MI.getDesc();
6270   uint64_t MIFlags = MCID.TSFlags;
6271   if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE)
6272     return outliner::InstrType::Illegal;
6273 
6274   // Is this a terminator for a basic block?
6275   if (MI.isTerminator()) {
6276     // Don't outline if the branch is not unconditional.
6277     if (isPredicated(MI))
6278       return outliner::InstrType::Illegal;
6279 
6280     // Is this the end of a function?
6281     if (MI.getParent()->succ_empty())
6282       return outliner::InstrType::Legal;
6283 
6284     // It's not, so don't outline it.
6285     return outliner::InstrType::Illegal;
6286   }
6287 
6288   // Make sure none of the operands are un-outlinable.
6289   for (const MachineOperand &MOP : MI.operands()) {
6290     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
6291         MOP.isTargetIndex())
6292       return outliner::InstrType::Illegal;
6293   }
6294 
6295   // Don't outline if link register or program counter value are used.
6296   if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI))
6297     return outliner::InstrType::Illegal;
6298 
6299   if (MI.isCall()) {
6300     // Get the function associated with the call.  Look at each operand and find
6301     // the one that represents the calle and get its name.
6302     const Function *Callee = nullptr;
6303     for (const MachineOperand &MOP : MI.operands()) {
6304       if (MOP.isGlobal()) {
6305         Callee = dyn_cast<Function>(MOP.getGlobal());
6306         break;
6307       }
6308     }
6309 
6310     // Dont't outline calls to "mcount" like functions, in particular Linux
6311     // kernel function tracing relies on it.
6312     if (Callee &&
6313         (Callee->getName() == "\01__gnu_mcount_nc" ||
6314          Callee->getName() == "\01mcount" || Callee->getName() == "__mcount"))
6315       return outliner::InstrType::Illegal;
6316 
6317     // If we don't know anything about the callee, assume it depends on the
6318     // stack layout of the caller. In that case, it's only legal to outline
6319     // as a tail-call. Explicitly list the call instructions we know about so
6320     // we don't get unexpected results with call pseudo-instructions.
6321     auto UnknownCallOutlineType = outliner::InstrType::Illegal;
6322     if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6323         Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6324         Opc == ARM::tBLXi)
6325       UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
6326 
6327     if (!Callee)
6328       return UnknownCallOutlineType;
6329 
6330     // We have a function we have information about.  Check if it's something we
6331     // can safely outline.
6332     MachineFunction *MF = MI.getParent()->getParent();
6333     MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
6334 
6335     // We don't know what's going on with the callee at all.  Don't touch it.
6336     if (!CalleeMF)
6337       return UnknownCallOutlineType;
6338 
6339     // Check if we know anything about the callee saves on the function. If we
6340     // don't, then don't touch it, since that implies that we haven't computed
6341     // anything about its stack frame yet.
6342     MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
6343     if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
6344         MFI.getNumObjects() > 0)
6345       return UnknownCallOutlineType;
6346 
6347     // At this point, we can say that CalleeMF ought to not pass anything on the
6348     // stack. Therefore, we can outline it.
6349     return outliner::InstrType::Legal;
6350   }
6351 
6352   // Since calls are handled, don't touch LR or PC
6353   if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI))
6354     return outliner::InstrType::Illegal;
6355 
6356   // Does this use the stack?
6357   if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) {
6358     // True if there is no chance that any outlined candidate from this range
6359     // could require stack fixups. That is, both
6360     // * LR is available in the range (No save/restore around call)
6361     // * The range doesn't include calls (No save/restore in outlined frame)
6362     // are true.
6363     // These conditions also ensure correctness of the return address
6364     // authentication - we insert sign and authentication instructions only if
6365     // we save/restore LR on stack, but then this condition ensures that the
6366     // outlined range does not modify the SP, therefore the SP value used for
6367     // signing is the same as the one used for authentication.
6368     // FIXME: This is very restrictive; the flags check the whole block,
6369     // not just the bit we will try to outline.
6370     bool MightNeedStackFixUp =
6371         (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6372                   MachineOutlinerMBBFlags::HasCalls));
6373 
6374     if (!MightNeedStackFixUp)
6375       return outliner::InstrType::Legal;
6376 
6377     // Any modification of SP will break our code to save/restore LR.
6378     // FIXME: We could handle some instructions which add a constant offset to
6379     // SP, with a bit more work.
6380     if (MI.modifiesRegister(ARM::SP, TRI))
6381       return outliner::InstrType::Illegal;
6382 
6383     // At this point, we have a stack instruction that we might need to fix up.
6384     // up. We'll handle it if it's a load or store.
6385     if (checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(),
6386                                   false))
6387       return outliner::InstrType::Legal;
6388 
6389     // We can't fix it up, so don't outline it.
6390     return outliner::InstrType::Illegal;
6391   }
6392 
6393   // Be conservative with IT blocks.
6394   if (MI.readsRegister(ARM::ITSTATE, TRI) ||
6395       MI.modifiesRegister(ARM::ITSTATE, TRI))
6396     return outliner::InstrType::Illegal;
6397 
6398   // Don't outline positions.
6399   if (MI.isPosition())
6400     return outliner::InstrType::Illegal;
6401 
6402   return outliner::InstrType::Legal;
6403 }
6404 
6405 void ARMBaseInstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
6406   for (MachineInstr &MI : MBB) {
6407     checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), true);
6408   }
6409 }
6410 
6411 void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB,
6412                                      MachineBasicBlock::iterator It, bool CFI,
6413                                      bool Auth) const {
6414   int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6415   assert(Align >= 8 && Align <= 256);
6416   if (Auth) {
6417     assert(Subtarget.isThumb2());
6418     // Compute PAC in R12. Outlining ensures R12 is dead across the outlined
6419     // sequence.
6420     BuildMI(MBB, It, DebugLoc(), get(ARM::t2PAC))
6421         .setMIFlags(MachineInstr::FrameSetup);
6422     BuildMI(MBB, It, DebugLoc(), get(ARM::t2STRD_PRE), ARM::SP)
6423         .addReg(ARM::R12, RegState::Kill)
6424         .addReg(ARM::LR, RegState::Kill)
6425         .addReg(ARM::SP)
6426         .addImm(-Align)
6427         .add(predOps(ARMCC::AL))
6428         .setMIFlags(MachineInstr::FrameSetup);
6429   } else {
6430     unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6431     BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::SP)
6432         .addReg(ARM::LR, RegState::Kill)
6433         .addReg(ARM::SP)
6434         .addImm(-Align)
6435         .add(predOps(ARMCC::AL))
6436         .setMIFlags(MachineInstr::FrameSetup);
6437   }
6438 
6439   if (!CFI)
6440     return;
6441 
6442   MachineFunction &MF = *MBB.getParent();
6443 
6444   // Add a CFI, saying CFA is offset by Align bytes from SP.
6445   int64_t StackPosEntry =
6446       MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Align));
6447   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6448       .addCFIIndex(StackPosEntry)
6449       .setMIFlags(MachineInstr::FrameSetup);
6450 
6451   // Add a CFI saying that the LR that we want to find is now higher than
6452   // before.
6453   int LROffset = Auth ? Align - 4 : Align;
6454   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6455   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6456   int64_t LRPosEntry = MF.addFrameInst(
6457       MCCFIInstruction::createOffset(nullptr, DwarfLR, -LROffset));
6458   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6459       .addCFIIndex(LRPosEntry)
6460       .setMIFlags(MachineInstr::FrameSetup);
6461   if (Auth) {
6462     // Add a CFI for the location of the return adddress PAC.
6463     unsigned DwarfRAC = MRI->getDwarfRegNum(ARM::RA_AUTH_CODE, true);
6464     int64_t RACPosEntry = MF.addFrameInst(
6465         MCCFIInstruction::createOffset(nullptr, DwarfRAC, -Align));
6466     BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6467         .addCFIIndex(RACPosEntry)
6468         .setMIFlags(MachineInstr::FrameSetup);
6469   }
6470 }
6471 
6472 void ARMBaseInstrInfo::emitCFIForLRSaveToReg(MachineBasicBlock &MBB,
6473                                              MachineBasicBlock::iterator It,
6474                                              Register Reg) const {
6475   MachineFunction &MF = *MBB.getParent();
6476   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6477   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6478   unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
6479 
6480   int64_t LRPosEntry = MF.addFrameInst(
6481       MCCFIInstruction::createRegister(nullptr, DwarfLR, DwarfReg));
6482   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6483       .addCFIIndex(LRPosEntry)
6484       .setMIFlags(MachineInstr::FrameSetup);
6485 }
6486 
6487 void ARMBaseInstrInfo::restoreLRFromStack(MachineBasicBlock &MBB,
6488                                           MachineBasicBlock::iterator It,
6489                                           bool CFI, bool Auth) const {
6490   int Align = Subtarget.getStackAlignment().value();
6491   if (Auth) {
6492     assert(Subtarget.isThumb2());
6493     // Restore return address PAC and LR.
6494     BuildMI(MBB, It, DebugLoc(), get(ARM::t2LDRD_POST))
6495         .addReg(ARM::R12, RegState::Define)
6496         .addReg(ARM::LR, RegState::Define)
6497         .addReg(ARM::SP, RegState::Define)
6498         .addReg(ARM::SP)
6499         .addImm(Align)
6500         .add(predOps(ARMCC::AL))
6501         .setMIFlags(MachineInstr::FrameDestroy);
6502     // LR authentication is after the CFI instructions, below.
6503   } else {
6504     unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6505     MachineInstrBuilder MIB = BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::LR)
6506                                   .addReg(ARM::SP, RegState::Define)
6507                                   .addReg(ARM::SP);
6508     if (!Subtarget.isThumb())
6509       MIB.addReg(0);
6510     MIB.addImm(Subtarget.getStackAlignment().value())
6511         .add(predOps(ARMCC::AL))
6512         .setMIFlags(MachineInstr::FrameDestroy);
6513   }
6514 
6515   if (CFI) {
6516     // Now stack has moved back up...
6517     MachineFunction &MF = *MBB.getParent();
6518     const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6519     unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6520     int64_t StackPosEntry =
6521         MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0));
6522     BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6523         .addCFIIndex(StackPosEntry)
6524         .setMIFlags(MachineInstr::FrameDestroy);
6525 
6526     // ... and we have restored LR.
6527     int64_t LRPosEntry =
6528         MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR));
6529     BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6530         .addCFIIndex(LRPosEntry)
6531         .setMIFlags(MachineInstr::FrameDestroy);
6532 
6533     if (Auth) {
6534       unsigned DwarfRAC = MRI->getDwarfRegNum(ARM::RA_AUTH_CODE, true);
6535       int64_t Entry =
6536           MF.addFrameInst(MCCFIInstruction::createUndefined(nullptr, DwarfRAC));
6537       BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6538           .addCFIIndex(Entry)
6539           .setMIFlags(MachineInstr::FrameDestroy);
6540     }
6541   }
6542 
6543   if (Auth)
6544     BuildMI(MBB, It, DebugLoc(), get(ARM::t2AUT));
6545 }
6546 
6547 void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6548     MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
6549   MachineFunction &MF = *MBB.getParent();
6550   const MCRegisterInfo *MRI = Subtarget.getRegisterInfo();
6551   unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true);
6552 
6553   int64_t LRPosEntry =
6554       MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR));
6555   BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION))
6556       .addCFIIndex(LRPosEntry)
6557       .setMIFlags(MachineInstr::FrameDestroy);
6558 }
6559 
6560 void ARMBaseInstrInfo::buildOutlinedFrame(
6561     MachineBasicBlock &MBB, MachineFunction &MF,
6562     const outliner::OutlinedFunction &OF) const {
6563   // For thunk outlining, rewrite the last instruction from a call to a
6564   // tail-call.
6565   if (OF.FrameConstructionID == MachineOutlinerThunk) {
6566     MachineInstr *Call = &*--MBB.instr_end();
6567     bool isThumb = Subtarget.isThumb();
6568     unsigned FuncOp = isThumb ? 2 : 0;
6569     unsigned Opc = Call->getOperand(FuncOp).isReg()
6570                        ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6571                        : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6572                                                              : ARM::tTAILJMPdND
6573                                  : ARM::TAILJMPd;
6574     MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc))
6575                                   .add(Call->getOperand(FuncOp));
6576     if (isThumb && !Call->getOperand(FuncOp).isReg())
6577       MIB.add(predOps(ARMCC::AL));
6578     Call->eraseFromParent();
6579   }
6580 
6581   // Is there a call in the outlined range?
6582   auto IsNonTailCall = [](MachineInstr &MI) {
6583     return MI.isCall() && !MI.isReturn();
6584   };
6585   if (llvm::any_of(MBB.instrs(), IsNonTailCall)) {
6586     MachineBasicBlock::iterator It = MBB.begin();
6587     MachineBasicBlock::iterator Et = MBB.end();
6588 
6589     if (OF.FrameConstructionID == MachineOutlinerTailCall ||
6590         OF.FrameConstructionID == MachineOutlinerThunk)
6591       Et = std::prev(MBB.end());
6592 
6593     // We have to save and restore LR, we need to add it to the liveins if it
6594     // is not already part of the set.  This is suffient since outlined
6595     // functions only have one block.
6596     if (!MBB.isLiveIn(ARM::LR))
6597       MBB.addLiveIn(ARM::LR);
6598 
6599     // Insert a save before the outlined region
6600     bool Auth = OF.Candidates.front()
6601                     .getMF()
6602                     ->getInfo<ARMFunctionInfo>()
6603                     ->shouldSignReturnAddress(true);
6604     saveLROnStack(MBB, It, true, Auth);
6605 
6606     // Fix up the instructions in the range, since we're going to modify the
6607     // stack.
6608     assert(OF.FrameConstructionID != MachineOutlinerDefault &&
6609            "Can only fix up stack references once");
6610     fixupPostOutline(MBB);
6611 
6612     // Insert a restore before the terminator for the function.  Restore LR.
6613     restoreLRFromStack(MBB, Et, true, Auth);
6614   }
6615 
6616   // If this is a tail call outlined function, then there's already a return.
6617   if (OF.FrameConstructionID == MachineOutlinerTailCall ||
6618       OF.FrameConstructionID == MachineOutlinerThunk)
6619     return;
6620 
6621   // Here we have to insert the return ourselves.  Get the correct opcode from
6622   // current feature set.
6623   BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode()))
6624       .add(predOps(ARMCC::AL));
6625 
6626   // Did we have to modify the stack by saving the link register?
6627   if (OF.FrameConstructionID != MachineOutlinerDefault &&
6628       OF.Candidates[0].CallConstructionID != MachineOutlinerDefault)
6629     return;
6630 
6631   // We modified the stack.
6632   // Walk over the basic block and fix up all the stack accesses.
6633   fixupPostOutline(MBB);
6634 }
6635 
6636 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall(
6637     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
6638     MachineFunction &MF, const outliner::Candidate &C) const {
6639   MachineInstrBuilder MIB;
6640   MachineBasicBlock::iterator CallPt;
6641   unsigned Opc;
6642   bool isThumb = Subtarget.isThumb();
6643 
6644   // Are we tail calling?
6645   if (C.CallConstructionID == MachineOutlinerTailCall) {
6646     // If yes, then we can just branch to the label.
6647     Opc = isThumb
6648               ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6649               : ARM::TAILJMPd;
6650     MIB = BuildMI(MF, DebugLoc(), get(Opc))
6651               .addGlobalAddress(M.getNamedValue(MF.getName()));
6652     if (isThumb)
6653       MIB.add(predOps(ARMCC::AL));
6654     It = MBB.insert(It, MIB);
6655     return It;
6656   }
6657 
6658   // Create the call instruction.
6659   Opc = isThumb ? ARM::tBL : ARM::BL;
6660   MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc));
6661   if (isThumb)
6662     CallMIB.add(predOps(ARMCC::AL));
6663   CallMIB.addGlobalAddress(M.getNamedValue(MF.getName()));
6664 
6665   if (C.CallConstructionID == MachineOutlinerNoLRSave ||
6666       C.CallConstructionID == MachineOutlinerThunk) {
6667     // No, so just insert the call.
6668     It = MBB.insert(It, CallMIB);
6669     return It;
6670   }
6671 
6672   const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>();
6673   // Can we save to a register?
6674   if (C.CallConstructionID == MachineOutlinerRegSave) {
6675     unsigned Reg = findRegisterToSaveLRTo(C);
6676     assert(Reg != 0 && "No callee-saved register available?");
6677 
6678     // Save and restore LR from that register.
6679     copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true);
6680     if (!AFI.isLRSpilled())
6681       emitCFIForLRSaveToReg(MBB, It, Reg);
6682     CallPt = MBB.insert(It, CallMIB);
6683     copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true);
6684     if (!AFI.isLRSpilled())
6685       emitCFIForLRRestoreFromReg(MBB, It);
6686     It--;
6687     return CallPt;
6688   }
6689   // We have the default case. Save and restore from SP.
6690   if (!MBB.isLiveIn(ARM::LR))
6691     MBB.addLiveIn(ARM::LR);
6692   bool Auth = !AFI.isLRSpilled() && AFI.shouldSignReturnAddress(true);
6693   saveLROnStack(MBB, It, !AFI.isLRSpilled(), Auth);
6694   CallPt = MBB.insert(It, CallMIB);
6695   restoreLRFromStack(MBB, It, !AFI.isLRSpilled(), Auth);
6696   It--;
6697   return CallPt;
6698 }
6699 
6700 bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault(
6701     MachineFunction &MF) const {
6702   return Subtarget.isMClass() && MF.getFunction().hasMinSize();
6703 }
6704 
6705 bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
6706                                                          AAResults *AA) const {
6707   // Try hard to rematerialize any VCTPs because if we spill P0, it will block
6708   // the tail predication conversion. This means that the element count
6709   // register has to be live for longer, but that has to be better than
6710   // spill/restore and VPT predication.
6711   return isVCTP(&MI) && !isPredicated(MI);
6712 }
6713 
6714 unsigned llvm::getBLXOpcode(const MachineFunction &MF) {
6715   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_noip
6716                                                           : ARM::BLX;
6717 }
6718 
6719 unsigned llvm::gettBLXrOpcode(const MachineFunction &MF) {
6720   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::tBLXr_noip
6721                                                           : ARM::tBLXr;
6722 }
6723 
6724 unsigned llvm::getBLXpredOpcode(const MachineFunction &MF) {
6725   return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_pred_noip
6726                                                           : ARM::BLX_pred;
6727 }
6728 
6729