xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp (revision 1f1e2261e341e6ca6862f82261066ef1705f0a7a)
1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64InstrInfo.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/IR/DebugInfoMetadata.h"
34 #include "llvm/IR/DebugLoc.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/MC/MCAsmInfo.h"
37 #include "llvm/MC/MCInst.h"
38 #include "llvm/MC/MCInstBuilder.h"
39 #include "llvm/MC/MCInstrDesc.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CodeGen.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 #include <cassert>
49 #include <cstdint>
50 #include <iterator>
51 #include <utility>
52 
53 using namespace llvm;
54 
55 #define GET_INSTRINFO_CTOR_DTOR
56 #include "AArch64GenInstrInfo.inc"
57 
58 static cl::opt<unsigned> TBZDisplacementBits(
59     "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
60     cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
61 
62 static cl::opt<unsigned> CBZDisplacementBits(
63     "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
64     cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
65 
66 static cl::opt<unsigned>
67     BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
68                         cl::desc("Restrict range of Bcc instructions (DEBUG)"));
69 
70 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
71     : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP,
72                           AArch64::CATCHRET),
73       RI(STI.getTargetTriple()), Subtarget(STI) {}
74 
75 /// GetInstSize - Return the number of bytes of code the specified
76 /// instruction may be.  This returns the maximum number of bytes.
77 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
78   const MachineBasicBlock &MBB = *MI.getParent();
79   const MachineFunction *MF = MBB.getParent();
80   const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
81 
82   {
83     auto Op = MI.getOpcode();
84     if (Op == AArch64::INLINEASM || Op == AArch64::INLINEASM_BR)
85       return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
86   }
87 
88   // Meta-instructions emit no code.
89   if (MI.isMetaInstruction())
90     return 0;
91 
92   // FIXME: We currently only handle pseudoinstructions that don't get expanded
93   //        before the assembly printer.
94   unsigned NumBytes = 0;
95   const MCInstrDesc &Desc = MI.getDesc();
96 
97   // Size should be preferably set in
98   // llvm/lib/Target/AArch64/AArch64InstrInfo.td (default case).
99   // Specific cases handle instructions of variable sizes
100   switch (Desc.getOpcode()) {
101   default:
102     if (Desc.getSize())
103       return Desc.getSize();
104 
105     // Anything not explicitly designated otherwise (i.e. pseudo-instructions
106     // with fixed constant size but not specified in .td file) is a normal
107     // 4-byte insn.
108     NumBytes = 4;
109     break;
110   case TargetOpcode::STACKMAP:
111     // The upper bound for a stackmap intrinsic is the full length of its shadow
112     NumBytes = StackMapOpers(&MI).getNumPatchBytes();
113     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
114     break;
115   case TargetOpcode::PATCHPOINT:
116     // The size of the patchpoint intrinsic is the number of bytes requested
117     NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
118     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
119     break;
120   case TargetOpcode::STATEPOINT:
121     NumBytes = StatepointOpers(&MI).getNumPatchBytes();
122     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
123     // No patch bytes means a normal call inst is emitted
124     if (NumBytes == 0)
125       NumBytes = 4;
126     break;
127   case AArch64::SPACE:
128     NumBytes = MI.getOperand(1).getImm();
129     break;
130   case TargetOpcode::BUNDLE:
131     NumBytes = getInstBundleLength(MI);
132     break;
133   }
134 
135   return NumBytes;
136 }
137 
138 unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {
139   unsigned Size = 0;
140   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
141   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
142   while (++I != E && I->isInsideBundle()) {
143     assert(!I->isBundle() && "No nested bundle!");
144     Size += getInstSizeInBytes(*I);
145   }
146   return Size;
147 }
148 
149 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
150                             SmallVectorImpl<MachineOperand> &Cond) {
151   // Block ends with fall-through condbranch.
152   switch (LastInst->getOpcode()) {
153   default:
154     llvm_unreachable("Unknown branch instruction?");
155   case AArch64::Bcc:
156     Target = LastInst->getOperand(1).getMBB();
157     Cond.push_back(LastInst->getOperand(0));
158     break;
159   case AArch64::CBZW:
160   case AArch64::CBZX:
161   case AArch64::CBNZW:
162   case AArch64::CBNZX:
163     Target = LastInst->getOperand(1).getMBB();
164     Cond.push_back(MachineOperand::CreateImm(-1));
165     Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
166     Cond.push_back(LastInst->getOperand(0));
167     break;
168   case AArch64::TBZW:
169   case AArch64::TBZX:
170   case AArch64::TBNZW:
171   case AArch64::TBNZX:
172     Target = LastInst->getOperand(2).getMBB();
173     Cond.push_back(MachineOperand::CreateImm(-1));
174     Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
175     Cond.push_back(LastInst->getOperand(0));
176     Cond.push_back(LastInst->getOperand(1));
177   }
178 }
179 
180 static unsigned getBranchDisplacementBits(unsigned Opc) {
181   switch (Opc) {
182   default:
183     llvm_unreachable("unexpected opcode!");
184   case AArch64::B:
185     return 64;
186   case AArch64::TBNZW:
187   case AArch64::TBZW:
188   case AArch64::TBNZX:
189   case AArch64::TBZX:
190     return TBZDisplacementBits;
191   case AArch64::CBNZW:
192   case AArch64::CBZW:
193   case AArch64::CBNZX:
194   case AArch64::CBZX:
195     return CBZDisplacementBits;
196   case AArch64::Bcc:
197     return BCCDisplacementBits;
198   }
199 }
200 
201 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
202                                              int64_t BrOffset) const {
203   unsigned Bits = getBranchDisplacementBits(BranchOp);
204   assert(Bits >= 3 && "max branch displacement must be enough to jump"
205                       "over conditional branch expansion");
206   return isIntN(Bits, BrOffset / 4);
207 }
208 
209 MachineBasicBlock *
210 AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
211   switch (MI.getOpcode()) {
212   default:
213     llvm_unreachable("unexpected opcode!");
214   case AArch64::B:
215     return MI.getOperand(0).getMBB();
216   case AArch64::TBZW:
217   case AArch64::TBNZW:
218   case AArch64::TBZX:
219   case AArch64::TBNZX:
220     return MI.getOperand(2).getMBB();
221   case AArch64::CBZW:
222   case AArch64::CBNZW:
223   case AArch64::CBZX:
224   case AArch64::CBNZX:
225   case AArch64::Bcc:
226     return MI.getOperand(1).getMBB();
227   }
228 }
229 
230 // Branch analysis.
231 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
232                                      MachineBasicBlock *&TBB,
233                                      MachineBasicBlock *&FBB,
234                                      SmallVectorImpl<MachineOperand> &Cond,
235                                      bool AllowModify) const {
236   // If the block has no terminators, it just falls into the block after it.
237   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
238   if (I == MBB.end())
239     return false;
240 
241   // Skip over SpeculationBarrierEndBB terminators
242   if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB ||
243       I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) {
244     --I;
245   }
246 
247   if (!isUnpredicatedTerminator(*I))
248     return false;
249 
250   // Get the last instruction in the block.
251   MachineInstr *LastInst = &*I;
252 
253   // If there is only one terminator instruction, process it.
254   unsigned LastOpc = LastInst->getOpcode();
255   if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
256     if (isUncondBranchOpcode(LastOpc)) {
257       TBB = LastInst->getOperand(0).getMBB();
258       return false;
259     }
260     if (isCondBranchOpcode(LastOpc)) {
261       // Block ends with fall-through condbranch.
262       parseCondBranch(LastInst, TBB, Cond);
263       return false;
264     }
265     return true; // Can't handle indirect branch.
266   }
267 
268   // Get the instruction before it if it is a terminator.
269   MachineInstr *SecondLastInst = &*I;
270   unsigned SecondLastOpc = SecondLastInst->getOpcode();
271 
272   // If AllowModify is true and the block ends with two or more unconditional
273   // branches, delete all but the first unconditional branch.
274   if (AllowModify && isUncondBranchOpcode(LastOpc)) {
275     while (isUncondBranchOpcode(SecondLastOpc)) {
276       LastInst->eraseFromParent();
277       LastInst = SecondLastInst;
278       LastOpc = LastInst->getOpcode();
279       if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
280         // Return now the only terminator is an unconditional branch.
281         TBB = LastInst->getOperand(0).getMBB();
282         return false;
283       } else {
284         SecondLastInst = &*I;
285         SecondLastOpc = SecondLastInst->getOpcode();
286       }
287     }
288   }
289 
290   // If we're allowed to modify and the block ends in a unconditional branch
291   // which could simply fallthrough, remove the branch.  (Note: This case only
292   // matters when we can't understand the whole sequence, otherwise it's also
293   // handled by BranchFolding.cpp.)
294   if (AllowModify && isUncondBranchOpcode(LastOpc) &&
295       MBB.isLayoutSuccessor(getBranchDestBlock(*LastInst))) {
296     LastInst->eraseFromParent();
297     LastInst = SecondLastInst;
298     LastOpc = LastInst->getOpcode();
299     if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
300       assert(!isUncondBranchOpcode(LastOpc) &&
301              "unreachable unconditional branches removed above");
302 
303       if (isCondBranchOpcode(LastOpc)) {
304         // Block ends with fall-through condbranch.
305         parseCondBranch(LastInst, TBB, Cond);
306         return false;
307       }
308       return true; // Can't handle indirect branch.
309     } else {
310       SecondLastInst = &*I;
311       SecondLastOpc = SecondLastInst->getOpcode();
312     }
313   }
314 
315   // If there are three terminators, we don't know what sort of block this is.
316   if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
317     return true;
318 
319   // If the block ends with a B and a Bcc, handle it.
320   if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
321     parseCondBranch(SecondLastInst, TBB, Cond);
322     FBB = LastInst->getOperand(0).getMBB();
323     return false;
324   }
325 
326   // If the block ends with two unconditional branches, handle it.  The second
327   // one is not executed, so remove it.
328   if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
329     TBB = SecondLastInst->getOperand(0).getMBB();
330     I = LastInst;
331     if (AllowModify)
332       I->eraseFromParent();
333     return false;
334   }
335 
336   // ...likewise if it ends with an indirect branch followed by an unconditional
337   // branch.
338   if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
339     I = LastInst;
340     if (AllowModify)
341       I->eraseFromParent();
342     return true;
343   }
344 
345   // Otherwise, can't handle this.
346   return true;
347 }
348 
349 bool AArch64InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
350                                               MachineBranchPredicate &MBP,
351                                               bool AllowModify) const {
352   // For the moment, handle only a block which ends with a cb(n)zx followed by
353   // a fallthrough.  Why this?  Because it is a common form.
354   // TODO: Should we handle b.cc?
355 
356   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
357   if (I == MBB.end())
358     return true;
359 
360   // Skip over SpeculationBarrierEndBB terminators
361   if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB ||
362       I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) {
363     --I;
364   }
365 
366   if (!isUnpredicatedTerminator(*I))
367     return true;
368 
369   // Get the last instruction in the block.
370   MachineInstr *LastInst = &*I;
371   unsigned LastOpc = LastInst->getOpcode();
372   if (!isCondBranchOpcode(LastOpc))
373     return true;
374 
375   switch (LastOpc) {
376   default:
377     return true;
378   case AArch64::CBZW:
379   case AArch64::CBZX:
380   case AArch64::CBNZW:
381   case AArch64::CBNZX:
382     break;
383   };
384 
385   MBP.TrueDest = LastInst->getOperand(1).getMBB();
386   assert(MBP.TrueDest && "expected!");
387   MBP.FalseDest = MBB.getNextNode();
388 
389   MBP.ConditionDef = nullptr;
390   MBP.SingleUseCondition = false;
391 
392   MBP.LHS = LastInst->getOperand(0);
393   MBP.RHS = MachineOperand::CreateImm(0);
394   MBP.Predicate = LastOpc == AArch64::CBNZX ? MachineBranchPredicate::PRED_NE
395                                             : MachineBranchPredicate::PRED_EQ;
396   return false;
397 }
398 
399 bool AArch64InstrInfo::reverseBranchCondition(
400     SmallVectorImpl<MachineOperand> &Cond) const {
401   if (Cond[0].getImm() != -1) {
402     // Regular Bcc
403     AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
404     Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
405   } else {
406     // Folded compare-and-branch
407     switch (Cond[1].getImm()) {
408     default:
409       llvm_unreachable("Unknown conditional branch!");
410     case AArch64::CBZW:
411       Cond[1].setImm(AArch64::CBNZW);
412       break;
413     case AArch64::CBNZW:
414       Cond[1].setImm(AArch64::CBZW);
415       break;
416     case AArch64::CBZX:
417       Cond[1].setImm(AArch64::CBNZX);
418       break;
419     case AArch64::CBNZX:
420       Cond[1].setImm(AArch64::CBZX);
421       break;
422     case AArch64::TBZW:
423       Cond[1].setImm(AArch64::TBNZW);
424       break;
425     case AArch64::TBNZW:
426       Cond[1].setImm(AArch64::TBZW);
427       break;
428     case AArch64::TBZX:
429       Cond[1].setImm(AArch64::TBNZX);
430       break;
431     case AArch64::TBNZX:
432       Cond[1].setImm(AArch64::TBZX);
433       break;
434     }
435   }
436 
437   return false;
438 }
439 
440 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
441                                         int *BytesRemoved) const {
442   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
443   if (I == MBB.end())
444     return 0;
445 
446   if (!isUncondBranchOpcode(I->getOpcode()) &&
447       !isCondBranchOpcode(I->getOpcode()))
448     return 0;
449 
450   // Remove the branch.
451   I->eraseFromParent();
452 
453   I = MBB.end();
454 
455   if (I == MBB.begin()) {
456     if (BytesRemoved)
457       *BytesRemoved = 4;
458     return 1;
459   }
460   --I;
461   if (!isCondBranchOpcode(I->getOpcode())) {
462     if (BytesRemoved)
463       *BytesRemoved = 4;
464     return 1;
465   }
466 
467   // Remove the branch.
468   I->eraseFromParent();
469   if (BytesRemoved)
470     *BytesRemoved = 8;
471 
472   return 2;
473 }
474 
475 void AArch64InstrInfo::instantiateCondBranch(
476     MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
477     ArrayRef<MachineOperand> Cond) const {
478   if (Cond[0].getImm() != -1) {
479     // Regular Bcc
480     BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
481   } else {
482     // Folded compare-and-branch
483     // Note that we use addOperand instead of addReg to keep the flags.
484     const MachineInstrBuilder MIB =
485         BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
486     if (Cond.size() > 3)
487       MIB.addImm(Cond[3].getImm());
488     MIB.addMBB(TBB);
489   }
490 }
491 
492 unsigned AArch64InstrInfo::insertBranch(
493     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
494     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
495   // Shouldn't be a fall through.
496   assert(TBB && "insertBranch must not be told to insert a fallthrough");
497 
498   if (!FBB) {
499     if (Cond.empty()) // Unconditional branch?
500       BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
501     else
502       instantiateCondBranch(MBB, DL, TBB, Cond);
503 
504     if (BytesAdded)
505       *BytesAdded = 4;
506 
507     return 1;
508   }
509 
510   // Two-way conditional branch.
511   instantiateCondBranch(MBB, DL, TBB, Cond);
512   BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
513 
514   if (BytesAdded)
515     *BytesAdded = 8;
516 
517   return 2;
518 }
519 
520 // Find the original register that VReg is copied from.
521 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
522   while (Register::isVirtualRegister(VReg)) {
523     const MachineInstr *DefMI = MRI.getVRegDef(VReg);
524     if (!DefMI->isFullCopy())
525       return VReg;
526     VReg = DefMI->getOperand(1).getReg();
527   }
528   return VReg;
529 }
530 
531 // Determine if VReg is defined by an instruction that can be folded into a
532 // csel instruction. If so, return the folded opcode, and the replacement
533 // register.
534 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
535                                 unsigned *NewVReg = nullptr) {
536   VReg = removeCopies(MRI, VReg);
537   if (!Register::isVirtualRegister(VReg))
538     return 0;
539 
540   bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
541   const MachineInstr *DefMI = MRI.getVRegDef(VReg);
542   unsigned Opc = 0;
543   unsigned SrcOpNum = 0;
544   switch (DefMI->getOpcode()) {
545   case AArch64::ADDSXri:
546   case AArch64::ADDSWri:
547     // if NZCV is used, do not fold.
548     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
549       return 0;
550     // fall-through to ADDXri and ADDWri.
551     LLVM_FALLTHROUGH;
552   case AArch64::ADDXri:
553   case AArch64::ADDWri:
554     // add x, 1 -> csinc.
555     if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
556         DefMI->getOperand(3).getImm() != 0)
557       return 0;
558     SrcOpNum = 1;
559     Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
560     break;
561 
562   case AArch64::ORNXrr:
563   case AArch64::ORNWrr: {
564     // not x -> csinv, represented as orn dst, xzr, src.
565     unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
566     if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
567       return 0;
568     SrcOpNum = 2;
569     Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
570     break;
571   }
572 
573   case AArch64::SUBSXrr:
574   case AArch64::SUBSWrr:
575     // if NZCV is used, do not fold.
576     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
577       return 0;
578     // fall-through to SUBXrr and SUBWrr.
579     LLVM_FALLTHROUGH;
580   case AArch64::SUBXrr:
581   case AArch64::SUBWrr: {
582     // neg x -> csneg, represented as sub dst, xzr, src.
583     unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
584     if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
585       return 0;
586     SrcOpNum = 2;
587     Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
588     break;
589   }
590   default:
591     return 0;
592   }
593   assert(Opc && SrcOpNum && "Missing parameters");
594 
595   if (NewVReg)
596     *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
597   return Opc;
598 }
599 
600 bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
601                                        ArrayRef<MachineOperand> Cond,
602                                        Register DstReg, Register TrueReg,
603                                        Register FalseReg, int &CondCycles,
604                                        int &TrueCycles,
605                                        int &FalseCycles) const {
606   // Check register classes.
607   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
608   const TargetRegisterClass *RC =
609       RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
610   if (!RC)
611     return false;
612 
613   // Also need to check the dest regclass, in case we're trying to optimize
614   // something like:
615   // %1(gpr) = PHI %2(fpr), bb1, %(fpr), bb2
616   if (!RI.getCommonSubClass(RC, MRI.getRegClass(DstReg)))
617     return false;
618 
619   // Expanding cbz/tbz requires an extra cycle of latency on the condition.
620   unsigned ExtraCondLat = Cond.size() != 1;
621 
622   // GPRs are handled by csel.
623   // FIXME: Fold in x+1, -x, and ~x when applicable.
624   if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
625       AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
626     // Single-cycle csel, csinc, csinv, and csneg.
627     CondCycles = 1 + ExtraCondLat;
628     TrueCycles = FalseCycles = 1;
629     if (canFoldIntoCSel(MRI, TrueReg))
630       TrueCycles = 0;
631     else if (canFoldIntoCSel(MRI, FalseReg))
632       FalseCycles = 0;
633     return true;
634   }
635 
636   // Scalar floating point is handled by fcsel.
637   // FIXME: Form fabs, fmin, and fmax when applicable.
638   if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
639       AArch64::FPR32RegClass.hasSubClassEq(RC)) {
640     CondCycles = 5 + ExtraCondLat;
641     TrueCycles = FalseCycles = 2;
642     return true;
643   }
644 
645   // Can't do vectors.
646   return false;
647 }
648 
649 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
650                                     MachineBasicBlock::iterator I,
651                                     const DebugLoc &DL, Register DstReg,
652                                     ArrayRef<MachineOperand> Cond,
653                                     Register TrueReg, Register FalseReg) const {
654   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
655 
656   // Parse the condition code, see parseCondBranch() above.
657   AArch64CC::CondCode CC;
658   switch (Cond.size()) {
659   default:
660     llvm_unreachable("Unknown condition opcode in Cond");
661   case 1: // b.cc
662     CC = AArch64CC::CondCode(Cond[0].getImm());
663     break;
664   case 3: { // cbz/cbnz
665     // We must insert a compare against 0.
666     bool Is64Bit;
667     switch (Cond[1].getImm()) {
668     default:
669       llvm_unreachable("Unknown branch opcode in Cond");
670     case AArch64::CBZW:
671       Is64Bit = false;
672       CC = AArch64CC::EQ;
673       break;
674     case AArch64::CBZX:
675       Is64Bit = true;
676       CC = AArch64CC::EQ;
677       break;
678     case AArch64::CBNZW:
679       Is64Bit = false;
680       CC = AArch64CC::NE;
681       break;
682     case AArch64::CBNZX:
683       Is64Bit = true;
684       CC = AArch64CC::NE;
685       break;
686     }
687     Register SrcReg = Cond[2].getReg();
688     if (Is64Bit) {
689       // cmp reg, #0 is actually subs xzr, reg, #0.
690       MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
691       BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
692           .addReg(SrcReg)
693           .addImm(0)
694           .addImm(0);
695     } else {
696       MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
697       BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
698           .addReg(SrcReg)
699           .addImm(0)
700           .addImm(0);
701     }
702     break;
703   }
704   case 4: { // tbz/tbnz
705     // We must insert a tst instruction.
706     switch (Cond[1].getImm()) {
707     default:
708       llvm_unreachable("Unknown branch opcode in Cond");
709     case AArch64::TBZW:
710     case AArch64::TBZX:
711       CC = AArch64CC::EQ;
712       break;
713     case AArch64::TBNZW:
714     case AArch64::TBNZX:
715       CC = AArch64CC::NE;
716       break;
717     }
718     // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
719     if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
720       BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
721           .addReg(Cond[2].getReg())
722           .addImm(
723               AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
724     else
725       BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
726           .addReg(Cond[2].getReg())
727           .addImm(
728               AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
729     break;
730   }
731   }
732 
733   unsigned Opc = 0;
734   const TargetRegisterClass *RC = nullptr;
735   bool TryFold = false;
736   if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
737     RC = &AArch64::GPR64RegClass;
738     Opc = AArch64::CSELXr;
739     TryFold = true;
740   } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
741     RC = &AArch64::GPR32RegClass;
742     Opc = AArch64::CSELWr;
743     TryFold = true;
744   } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
745     RC = &AArch64::FPR64RegClass;
746     Opc = AArch64::FCSELDrrr;
747   } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
748     RC = &AArch64::FPR32RegClass;
749     Opc = AArch64::FCSELSrrr;
750   }
751   assert(RC && "Unsupported regclass");
752 
753   // Try folding simple instructions into the csel.
754   if (TryFold) {
755     unsigned NewVReg = 0;
756     unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
757     if (FoldedOpc) {
758       // The folded opcodes csinc, csinc and csneg apply the operation to
759       // FalseReg, so we need to invert the condition.
760       CC = AArch64CC::getInvertedCondCode(CC);
761       TrueReg = FalseReg;
762     } else
763       FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
764 
765     // Fold the operation. Leave any dead instructions for DCE to clean up.
766     if (FoldedOpc) {
767       FalseReg = NewVReg;
768       Opc = FoldedOpc;
769       // The extends the live range of NewVReg.
770       MRI.clearKillFlags(NewVReg);
771     }
772   }
773 
774   // Pull all virtual register into the appropriate class.
775   MRI.constrainRegClass(TrueReg, RC);
776   MRI.constrainRegClass(FalseReg, RC);
777 
778   // Insert the csel.
779   BuildMI(MBB, I, DL, get(Opc), DstReg)
780       .addReg(TrueReg)
781       .addReg(FalseReg)
782       .addImm(CC);
783 }
784 
785 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an  ORRxx.
786 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
787   uint64_t Imm = MI.getOperand(1).getImm();
788   uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
789   uint64_t Encoding;
790   return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
791 }
792 
793 // FIXME: this implementation should be micro-architecture dependent, so a
794 // micro-architecture target hook should be introduced here in future.
795 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
796   if (!Subtarget.hasCustomCheapAsMoveHandling())
797     return MI.isAsCheapAsAMove();
798 
799   const unsigned Opcode = MI.getOpcode();
800 
801   // Firstly, check cases gated by features.
802 
803   if (Subtarget.hasZeroCycleZeroingFP()) {
804     if (Opcode == AArch64::FMOVH0 ||
805         Opcode == AArch64::FMOVS0 ||
806         Opcode == AArch64::FMOVD0)
807       return true;
808   }
809 
810   if (Subtarget.hasZeroCycleZeroingGP()) {
811     if (Opcode == TargetOpcode::COPY &&
812         (MI.getOperand(1).getReg() == AArch64::WZR ||
813          MI.getOperand(1).getReg() == AArch64::XZR))
814       return true;
815   }
816 
817   // Secondly, check cases specific to sub-targets.
818 
819   if (Subtarget.hasExynosCheapAsMoveHandling()) {
820     if (isExynosCheapAsMove(MI))
821       return true;
822 
823     return MI.isAsCheapAsAMove();
824   }
825 
826   // Finally, check generic cases.
827 
828   switch (Opcode) {
829   default:
830     return false;
831 
832   // add/sub on register without shift
833   case AArch64::ADDWri:
834   case AArch64::ADDXri:
835   case AArch64::SUBWri:
836   case AArch64::SUBXri:
837     return (MI.getOperand(3).getImm() == 0);
838 
839   // logical ops on immediate
840   case AArch64::ANDWri:
841   case AArch64::ANDXri:
842   case AArch64::EORWri:
843   case AArch64::EORXri:
844   case AArch64::ORRWri:
845   case AArch64::ORRXri:
846     return true;
847 
848   // logical ops on register without shift
849   case AArch64::ANDWrr:
850   case AArch64::ANDXrr:
851   case AArch64::BICWrr:
852   case AArch64::BICXrr:
853   case AArch64::EONWrr:
854   case AArch64::EONXrr:
855   case AArch64::EORWrr:
856   case AArch64::EORXrr:
857   case AArch64::ORNWrr:
858   case AArch64::ORNXrr:
859   case AArch64::ORRWrr:
860   case AArch64::ORRXrr:
861     return true;
862 
863   // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
864   // ORRXri, it is as cheap as MOV
865   case AArch64::MOVi32imm:
866     return canBeExpandedToORR(MI, 32);
867   case AArch64::MOVi64imm:
868     return canBeExpandedToORR(MI, 64);
869   }
870 
871   llvm_unreachable("Unknown opcode to check as cheap as a move!");
872 }
873 
874 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) {
875   switch (MI.getOpcode()) {
876   default:
877     return false;
878 
879   case AArch64::ADDWrs:
880   case AArch64::ADDXrs:
881   case AArch64::ADDSWrs:
882   case AArch64::ADDSXrs: {
883     unsigned Imm = MI.getOperand(3).getImm();
884     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
885     if (ShiftVal == 0)
886       return true;
887     return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
888   }
889 
890   case AArch64::ADDWrx:
891   case AArch64::ADDXrx:
892   case AArch64::ADDXrx64:
893   case AArch64::ADDSWrx:
894   case AArch64::ADDSXrx:
895   case AArch64::ADDSXrx64: {
896     unsigned Imm = MI.getOperand(3).getImm();
897     switch (AArch64_AM::getArithExtendType(Imm)) {
898     default:
899       return false;
900     case AArch64_AM::UXTB:
901     case AArch64_AM::UXTH:
902     case AArch64_AM::UXTW:
903     case AArch64_AM::UXTX:
904       return AArch64_AM::getArithShiftValue(Imm) <= 4;
905     }
906   }
907 
908   case AArch64::SUBWrs:
909   case AArch64::SUBSWrs: {
910     unsigned Imm = MI.getOperand(3).getImm();
911     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
912     return ShiftVal == 0 ||
913            (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
914   }
915 
916   case AArch64::SUBXrs:
917   case AArch64::SUBSXrs: {
918     unsigned Imm = MI.getOperand(3).getImm();
919     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
920     return ShiftVal == 0 ||
921            (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
922   }
923 
924   case AArch64::SUBWrx:
925   case AArch64::SUBXrx:
926   case AArch64::SUBXrx64:
927   case AArch64::SUBSWrx:
928   case AArch64::SUBSXrx:
929   case AArch64::SUBSXrx64: {
930     unsigned Imm = MI.getOperand(3).getImm();
931     switch (AArch64_AM::getArithExtendType(Imm)) {
932     default:
933       return false;
934     case AArch64_AM::UXTB:
935     case AArch64_AM::UXTH:
936     case AArch64_AM::UXTW:
937     case AArch64_AM::UXTX:
938       return AArch64_AM::getArithShiftValue(Imm) == 0;
939     }
940   }
941 
942   case AArch64::LDRBBroW:
943   case AArch64::LDRBBroX:
944   case AArch64::LDRBroW:
945   case AArch64::LDRBroX:
946   case AArch64::LDRDroW:
947   case AArch64::LDRDroX:
948   case AArch64::LDRHHroW:
949   case AArch64::LDRHHroX:
950   case AArch64::LDRHroW:
951   case AArch64::LDRHroX:
952   case AArch64::LDRQroW:
953   case AArch64::LDRQroX:
954   case AArch64::LDRSBWroW:
955   case AArch64::LDRSBWroX:
956   case AArch64::LDRSBXroW:
957   case AArch64::LDRSBXroX:
958   case AArch64::LDRSHWroW:
959   case AArch64::LDRSHWroX:
960   case AArch64::LDRSHXroW:
961   case AArch64::LDRSHXroX:
962   case AArch64::LDRSWroW:
963   case AArch64::LDRSWroX:
964   case AArch64::LDRSroW:
965   case AArch64::LDRSroX:
966   case AArch64::LDRWroW:
967   case AArch64::LDRWroX:
968   case AArch64::LDRXroW:
969   case AArch64::LDRXroX:
970   case AArch64::PRFMroW:
971   case AArch64::PRFMroX:
972   case AArch64::STRBBroW:
973   case AArch64::STRBBroX:
974   case AArch64::STRBroW:
975   case AArch64::STRBroX:
976   case AArch64::STRDroW:
977   case AArch64::STRDroX:
978   case AArch64::STRHHroW:
979   case AArch64::STRHHroX:
980   case AArch64::STRHroW:
981   case AArch64::STRHroX:
982   case AArch64::STRQroW:
983   case AArch64::STRQroX:
984   case AArch64::STRSroW:
985   case AArch64::STRSroX:
986   case AArch64::STRWroW:
987   case AArch64::STRWroX:
988   case AArch64::STRXroW:
989   case AArch64::STRXroX: {
990     unsigned IsSigned = MI.getOperand(3).getImm();
991     return !IsSigned;
992   }
993   }
994 }
995 
996 bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
997   unsigned Opc = MI.getOpcode();
998   switch (Opc) {
999     default:
1000       return false;
1001     case AArch64::SEH_StackAlloc:
1002     case AArch64::SEH_SaveFPLR:
1003     case AArch64::SEH_SaveFPLR_X:
1004     case AArch64::SEH_SaveReg:
1005     case AArch64::SEH_SaveReg_X:
1006     case AArch64::SEH_SaveRegP:
1007     case AArch64::SEH_SaveRegP_X:
1008     case AArch64::SEH_SaveFReg:
1009     case AArch64::SEH_SaveFReg_X:
1010     case AArch64::SEH_SaveFRegP:
1011     case AArch64::SEH_SaveFRegP_X:
1012     case AArch64::SEH_SetFP:
1013     case AArch64::SEH_AddFP:
1014     case AArch64::SEH_Nop:
1015     case AArch64::SEH_PrologEnd:
1016     case AArch64::SEH_EpilogStart:
1017     case AArch64::SEH_EpilogEnd:
1018       return true;
1019   }
1020 }
1021 
1022 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
1023                                              Register &SrcReg, Register &DstReg,
1024                                              unsigned &SubIdx) const {
1025   switch (MI.getOpcode()) {
1026   default:
1027     return false;
1028   case AArch64::SBFMXri: // aka sxtw
1029   case AArch64::UBFMXri: // aka uxtw
1030     // Check for the 32 -> 64 bit extension case, these instructions can do
1031     // much more.
1032     if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
1033       return false;
1034     // This is a signed or unsigned 32 -> 64 bit extension.
1035     SrcReg = MI.getOperand(1).getReg();
1036     DstReg = MI.getOperand(0).getReg();
1037     SubIdx = AArch64::sub_32;
1038     return true;
1039   }
1040 }
1041 
1042 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
1043     const MachineInstr &MIa, const MachineInstr &MIb) const {
1044   const TargetRegisterInfo *TRI = &getRegisterInfo();
1045   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1046   int64_t OffsetA = 0, OffsetB = 0;
1047   unsigned WidthA = 0, WidthB = 0;
1048   bool OffsetAIsScalable = false, OffsetBIsScalable = false;
1049 
1050   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1051   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1052 
1053   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1054       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1055     return false;
1056 
1057   // Retrieve the base, offset from the base and width. Width
1058   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8).  If
1059   // base are identical, and the offset of a lower memory access +
1060   // the width doesn't overlap the offset of a higher memory access,
1061   // then the memory accesses are different.
1062   // If OffsetAIsScalable and OffsetBIsScalable are both true, they
1063   // are assumed to have the same scale (vscale).
1064   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, OffsetAIsScalable,
1065                                    WidthA, TRI) &&
1066       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, OffsetBIsScalable,
1067                                    WidthB, TRI)) {
1068     if (BaseOpA->isIdenticalTo(*BaseOpB) &&
1069         OffsetAIsScalable == OffsetBIsScalable) {
1070       int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1071       int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1072       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1073       if (LowOffset + LowWidth <= HighOffset)
1074         return true;
1075     }
1076   }
1077   return false;
1078 }
1079 
1080 bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1081                                             const MachineBasicBlock *MBB,
1082                                             const MachineFunction &MF) const {
1083   if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
1084     return true;
1085   switch (MI.getOpcode()) {
1086   case AArch64::HINT:
1087     // CSDB hints are scheduling barriers.
1088     if (MI.getOperand(0).getImm() == 0x14)
1089       return true;
1090     break;
1091   case AArch64::DSB:
1092   case AArch64::ISB:
1093     // DSB and ISB also are scheduling barriers.
1094     return true;
1095   default:;
1096   }
1097   return isSEHInstruction(MI);
1098 }
1099 
1100 /// analyzeCompare - For a comparison instruction, return the source registers
1101 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
1102 /// Return true if the comparison instruction can be analyzed.
1103 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1104                                       Register &SrcReg2, int64_t &CmpMask,
1105                                       int64_t &CmpValue) const {
1106   // The first operand can be a frame index where we'd normally expect a
1107   // register.
1108   assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
1109   if (!MI.getOperand(1).isReg())
1110     return false;
1111 
1112   switch (MI.getOpcode()) {
1113   default:
1114     break;
1115   case AArch64::PTEST_PP:
1116     SrcReg = MI.getOperand(0).getReg();
1117     SrcReg2 = MI.getOperand(1).getReg();
1118     // Not sure about the mask and value for now...
1119     CmpMask = ~0;
1120     CmpValue = 0;
1121     return true;
1122   case AArch64::SUBSWrr:
1123   case AArch64::SUBSWrs:
1124   case AArch64::SUBSWrx:
1125   case AArch64::SUBSXrr:
1126   case AArch64::SUBSXrs:
1127   case AArch64::SUBSXrx:
1128   case AArch64::ADDSWrr:
1129   case AArch64::ADDSWrs:
1130   case AArch64::ADDSWrx:
1131   case AArch64::ADDSXrr:
1132   case AArch64::ADDSXrs:
1133   case AArch64::ADDSXrx:
1134     // Replace SUBSWrr with SUBWrr if NZCV is not used.
1135     SrcReg = MI.getOperand(1).getReg();
1136     SrcReg2 = MI.getOperand(2).getReg();
1137     CmpMask = ~0;
1138     CmpValue = 0;
1139     return true;
1140   case AArch64::SUBSWri:
1141   case AArch64::ADDSWri:
1142   case AArch64::SUBSXri:
1143   case AArch64::ADDSXri:
1144     SrcReg = MI.getOperand(1).getReg();
1145     SrcReg2 = 0;
1146     CmpMask = ~0;
1147     CmpValue = MI.getOperand(2).getImm();
1148     return true;
1149   case AArch64::ANDSWri:
1150   case AArch64::ANDSXri:
1151     // ANDS does not use the same encoding scheme as the others xxxS
1152     // instructions.
1153     SrcReg = MI.getOperand(1).getReg();
1154     SrcReg2 = 0;
1155     CmpMask = ~0;
1156     CmpValue = AArch64_AM::decodeLogicalImmediate(
1157                    MI.getOperand(2).getImm(),
1158                    MI.getOpcode() == AArch64::ANDSWri ? 32 : 64);
1159     return true;
1160   }
1161 
1162   return false;
1163 }
1164 
1165 static bool UpdateOperandRegClass(MachineInstr &Instr) {
1166   MachineBasicBlock *MBB = Instr.getParent();
1167   assert(MBB && "Can't get MachineBasicBlock here");
1168   MachineFunction *MF = MBB->getParent();
1169   assert(MF && "Can't get MachineFunction here");
1170   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1171   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1172   MachineRegisterInfo *MRI = &MF->getRegInfo();
1173 
1174   for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1175        ++OpIdx) {
1176     MachineOperand &MO = Instr.getOperand(OpIdx);
1177     const TargetRegisterClass *OpRegCstraints =
1178         Instr.getRegClassConstraint(OpIdx, TII, TRI);
1179 
1180     // If there's no constraint, there's nothing to do.
1181     if (!OpRegCstraints)
1182       continue;
1183     // If the operand is a frame index, there's nothing to do here.
1184     // A frame index operand will resolve correctly during PEI.
1185     if (MO.isFI())
1186       continue;
1187 
1188     assert(MO.isReg() &&
1189            "Operand has register constraints without being a register!");
1190 
1191     Register Reg = MO.getReg();
1192     if (Register::isPhysicalRegister(Reg)) {
1193       if (!OpRegCstraints->contains(Reg))
1194         return false;
1195     } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1196                !MRI->constrainRegClass(Reg, OpRegCstraints))
1197       return false;
1198   }
1199 
1200   return true;
1201 }
1202 
1203 /// Return the opcode that does not set flags when possible - otherwise
1204 /// return the original opcode. The caller is responsible to do the actual
1205 /// substitution and legality checking.
1206 static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
1207   // Don't convert all compare instructions, because for some the zero register
1208   // encoding becomes the sp register.
1209   bool MIDefinesZeroReg = false;
1210   if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1211     MIDefinesZeroReg = true;
1212 
1213   switch (MI.getOpcode()) {
1214   default:
1215     return MI.getOpcode();
1216   case AArch64::ADDSWrr:
1217     return AArch64::ADDWrr;
1218   case AArch64::ADDSWri:
1219     return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1220   case AArch64::ADDSWrs:
1221     return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1222   case AArch64::ADDSWrx:
1223     return AArch64::ADDWrx;
1224   case AArch64::ADDSXrr:
1225     return AArch64::ADDXrr;
1226   case AArch64::ADDSXri:
1227     return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1228   case AArch64::ADDSXrs:
1229     return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1230   case AArch64::ADDSXrx:
1231     return AArch64::ADDXrx;
1232   case AArch64::SUBSWrr:
1233     return AArch64::SUBWrr;
1234   case AArch64::SUBSWri:
1235     return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1236   case AArch64::SUBSWrs:
1237     return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1238   case AArch64::SUBSWrx:
1239     return AArch64::SUBWrx;
1240   case AArch64::SUBSXrr:
1241     return AArch64::SUBXrr;
1242   case AArch64::SUBSXri:
1243     return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1244   case AArch64::SUBSXrs:
1245     return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1246   case AArch64::SUBSXrx:
1247     return AArch64::SUBXrx;
1248   }
1249 }
1250 
1251 enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 };
1252 
1253 /// True when condition flags are accessed (either by writing or reading)
1254 /// on the instruction trace starting at From and ending at To.
1255 ///
1256 /// Note: If From and To are from different blocks it's assumed CC are accessed
1257 ///       on the path.
1258 static bool areCFlagsAccessedBetweenInstrs(
1259     MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
1260     const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1261   // Early exit if To is at the beginning of the BB.
1262   if (To == To->getParent()->begin())
1263     return true;
1264 
1265   // Check whether the instructions are in the same basic block
1266   // If not, assume the condition flags might get modified somewhere.
1267   if (To->getParent() != From->getParent())
1268     return true;
1269 
1270   // From must be above To.
1271   assert(std::any_of(
1272       ++To.getReverse(), To->getParent()->rend(),
1273       [From](MachineInstr &MI) { return MI.getIterator() == From; }));
1274 
1275   // We iterate backward starting at \p To until we hit \p From.
1276   for (const MachineInstr &Instr :
1277        instructionsWithoutDebug(++To.getReverse(), From.getReverse())) {
1278     if (((AccessToCheck & AK_Write) &&
1279          Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1280         ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1281       return true;
1282   }
1283   return false;
1284 }
1285 
1286 /// optimizePTestInstr - Attempt to remove a ptest of a predicate-generating
1287 /// operation which could set the flags in an identical manner
1288 bool AArch64InstrInfo::optimizePTestInstr(
1289     MachineInstr *PTest, unsigned MaskReg, unsigned PredReg,
1290     const MachineRegisterInfo *MRI) const {
1291   auto *Mask = MRI->getUniqueVRegDef(MaskReg);
1292   auto *Pred = MRI->getUniqueVRegDef(PredReg);
1293   auto NewOp = Pred->getOpcode();
1294   bool OpChanged = false;
1295 
1296   unsigned MaskOpcode = Mask->getOpcode();
1297   unsigned PredOpcode = Pred->getOpcode();
1298   bool PredIsPTestLike = isPTestLikeOpcode(PredOpcode);
1299   bool PredIsWhileLike = isWhileOpcode(PredOpcode);
1300 
1301   if (isPTrueOpcode(MaskOpcode) && (PredIsPTestLike || PredIsWhileLike)) {
1302     // For PTEST(PTRUE, OTHER_INST), PTEST is redundant when PTRUE doesn't
1303     // deactivate any lanes OTHER_INST might set.
1304     uint64_t MaskElementSize = getElementSizeForOpcode(MaskOpcode);
1305     uint64_t PredElementSize = getElementSizeForOpcode(PredOpcode);
1306 
1307     // Must be an all active predicate of matching element size.
1308     if ((PredElementSize != MaskElementSize) ||
1309         (Mask->getOperand(1).getImm() != 31))
1310       return false;
1311 
1312     // Fallthough to simply remove the PTEST.
1313   } else if ((Mask == Pred) && (PredIsPTestLike || PredIsWhileLike)) {
1314     // For PTEST(PG, PG), PTEST is redundant when PG is the result of an
1315     // instruction that sets the flags as PTEST would.
1316 
1317     // Fallthough to simply remove the PTEST.
1318   } else if (PredIsPTestLike) {
1319     // For PTEST(PG_1, PTEST_LIKE(PG2, ...)), PTEST is redundant when both
1320     // instructions use the same predicate.
1321     auto PTestLikeMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1322     if (Mask != PTestLikeMask)
1323       return false;
1324 
1325     // Fallthough to simply remove the PTEST.
1326   } else {
1327     switch (Pred->getOpcode()) {
1328     case AArch64::BRKB_PPzP:
1329     case AArch64::BRKPB_PPzPP: {
1330       // Op 0 is chain, 1 is the mask, 2 the previous predicate to
1331       // propagate, 3 the new predicate.
1332 
1333       // Check to see if our mask is the same as the brkpb's. If
1334       // not the resulting flag bits may be different and we
1335       // can't remove the ptest.
1336       auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1337       if (Mask != PredMask)
1338         return false;
1339 
1340       // Switch to the new opcode
1341       NewOp = Pred->getOpcode() == AArch64::BRKB_PPzP ? AArch64::BRKBS_PPzP
1342                                                       : AArch64::BRKPBS_PPzPP;
1343       OpChanged = true;
1344       break;
1345     }
1346     case AArch64::BRKN_PPzP: {
1347       auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1348       if (Mask != PredMask)
1349         return false;
1350 
1351       NewOp = AArch64::BRKNS_PPzP;
1352       OpChanged = true;
1353       break;
1354     }
1355     case AArch64::RDFFR_PPz: {
1356       // rdffr   p1.b, PredMask=p0/z <--- Definition of Pred
1357       // ptest   Mask=p0, Pred=p1.b  <--- If equal masks, remove this and use
1358       //                                  `rdffrs p1.b, p0/z` above.
1359       auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1360       if (Mask != PredMask)
1361         return false;
1362 
1363       NewOp = AArch64::RDFFRS_PPz;
1364       OpChanged = true;
1365       break;
1366     }
1367     default:
1368       // Bail out if we don't recognize the input
1369       return false;
1370     }
1371   }
1372 
1373   const TargetRegisterInfo *TRI = &getRegisterInfo();
1374 
1375   // If another instruction between Pred and PTest accesses flags, don't remove
1376   // the ptest or update the earlier instruction to modify them.
1377   if (areCFlagsAccessedBetweenInstrs(Pred, PTest, TRI))
1378     return false;
1379 
1380   // If we pass all the checks, it's safe to remove the PTEST and use the flags
1381   // as they are prior to PTEST. Sometimes this requires the tested PTEST
1382   // operand to be replaced with an equivalent instruction that also sets the
1383   // flags.
1384   Pred->setDesc(get(NewOp));
1385   PTest->eraseFromParent();
1386   if (OpChanged) {
1387     bool succeeded = UpdateOperandRegClass(*Pred);
1388     (void)succeeded;
1389     assert(succeeded && "Operands have incompatible register classes!");
1390     Pred->addRegisterDefined(AArch64::NZCV, TRI);
1391   }
1392 
1393   // Ensure that the flags def is live.
1394   if (Pred->registerDefIsDead(AArch64::NZCV, TRI)) {
1395     unsigned i = 0, e = Pred->getNumOperands();
1396     for (; i != e; ++i) {
1397       MachineOperand &MO = Pred->getOperand(i);
1398       if (MO.isReg() && MO.isDef() && MO.getReg() == AArch64::NZCV) {
1399         MO.setIsDead(false);
1400         break;
1401       }
1402     }
1403   }
1404   return true;
1405 }
1406 
1407 /// Try to optimize a compare instruction. A compare instruction is an
1408 /// instruction which produces AArch64::NZCV. It can be truly compare
1409 /// instruction
1410 /// when there are no uses of its destination register.
1411 ///
1412 /// The following steps are tried in order:
1413 /// 1. Convert CmpInstr into an unconditional version.
1414 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1415 ///    condition code or an instruction which can be converted into such an
1416 ///    instruction.
1417 ///    Only comparison with zero is supported.
1418 bool AArch64InstrInfo::optimizeCompareInstr(
1419     MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
1420     int64_t CmpValue, const MachineRegisterInfo *MRI) const {
1421   assert(CmpInstr.getParent());
1422   assert(MRI);
1423 
1424   // Replace SUBSWrr with SUBWrr if NZCV is not used.
1425   int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1426   if (DeadNZCVIdx != -1) {
1427     if (CmpInstr.definesRegister(AArch64::WZR) ||
1428         CmpInstr.definesRegister(AArch64::XZR)) {
1429       CmpInstr.eraseFromParent();
1430       return true;
1431     }
1432     unsigned Opc = CmpInstr.getOpcode();
1433     unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
1434     if (NewOpc == Opc)
1435       return false;
1436     const MCInstrDesc &MCID = get(NewOpc);
1437     CmpInstr.setDesc(MCID);
1438     CmpInstr.RemoveOperand(DeadNZCVIdx);
1439     bool succeeded = UpdateOperandRegClass(CmpInstr);
1440     (void)succeeded;
1441     assert(succeeded && "Some operands reg class are incompatible!");
1442     return true;
1443   }
1444 
1445   if (CmpInstr.getOpcode() == AArch64::PTEST_PP)
1446     return optimizePTestInstr(&CmpInstr, SrcReg, SrcReg2, MRI);
1447 
1448   if (SrcReg2 != 0)
1449     return false;
1450 
1451   // CmpInstr is a Compare instruction if destination register is not used.
1452   if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1453     return false;
1454 
1455   if (CmpValue == 0 && substituteCmpToZero(CmpInstr, SrcReg, *MRI))
1456     return true;
1457   return (CmpValue == 0 || CmpValue == 1) &&
1458          removeCmpToZeroOrOne(CmpInstr, SrcReg, CmpValue, *MRI);
1459 }
1460 
1461 /// Get opcode of S version of Instr.
1462 /// If Instr is S version its opcode is returned.
1463 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1464 /// or we are not interested in it.
1465 static unsigned sForm(MachineInstr &Instr) {
1466   switch (Instr.getOpcode()) {
1467   default:
1468     return AArch64::INSTRUCTION_LIST_END;
1469 
1470   case AArch64::ADDSWrr:
1471   case AArch64::ADDSWri:
1472   case AArch64::ADDSXrr:
1473   case AArch64::ADDSXri:
1474   case AArch64::SUBSWrr:
1475   case AArch64::SUBSWri:
1476   case AArch64::SUBSXrr:
1477   case AArch64::SUBSXri:
1478     return Instr.getOpcode();
1479 
1480   case AArch64::ADDWrr:
1481     return AArch64::ADDSWrr;
1482   case AArch64::ADDWri:
1483     return AArch64::ADDSWri;
1484   case AArch64::ADDXrr:
1485     return AArch64::ADDSXrr;
1486   case AArch64::ADDXri:
1487     return AArch64::ADDSXri;
1488   case AArch64::ADCWr:
1489     return AArch64::ADCSWr;
1490   case AArch64::ADCXr:
1491     return AArch64::ADCSXr;
1492   case AArch64::SUBWrr:
1493     return AArch64::SUBSWrr;
1494   case AArch64::SUBWri:
1495     return AArch64::SUBSWri;
1496   case AArch64::SUBXrr:
1497     return AArch64::SUBSXrr;
1498   case AArch64::SUBXri:
1499     return AArch64::SUBSXri;
1500   case AArch64::SBCWr:
1501     return AArch64::SBCSWr;
1502   case AArch64::SBCXr:
1503     return AArch64::SBCSXr;
1504   case AArch64::ANDWri:
1505     return AArch64::ANDSWri;
1506   case AArch64::ANDXri:
1507     return AArch64::ANDSXri;
1508   }
1509 }
1510 
1511 /// Check if AArch64::NZCV should be alive in successors of MBB.
1512 static bool areCFlagsAliveInSuccessors(const MachineBasicBlock *MBB) {
1513   for (auto *BB : MBB->successors())
1514     if (BB->isLiveIn(AArch64::NZCV))
1515       return true;
1516   return false;
1517 }
1518 
1519 /// \returns The condition code operand index for \p Instr if it is a branch
1520 /// or select and -1 otherwise.
1521 static int
1522 findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {
1523   switch (Instr.getOpcode()) {
1524   default:
1525     return -1;
1526 
1527   case AArch64::Bcc: {
1528     int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1529     assert(Idx >= 2);
1530     return Idx - 2;
1531   }
1532 
1533   case AArch64::CSINVWr:
1534   case AArch64::CSINVXr:
1535   case AArch64::CSINCWr:
1536   case AArch64::CSINCXr:
1537   case AArch64::CSELWr:
1538   case AArch64::CSELXr:
1539   case AArch64::CSNEGWr:
1540   case AArch64::CSNEGXr:
1541   case AArch64::FCSELSrrr:
1542   case AArch64::FCSELDrrr: {
1543     int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1544     assert(Idx >= 1);
1545     return Idx - 1;
1546   }
1547   }
1548 }
1549 
1550 namespace {
1551 
1552 struct UsedNZCV {
1553   bool N = false;
1554   bool Z = false;
1555   bool C = false;
1556   bool V = false;
1557 
1558   UsedNZCV() = default;
1559 
1560   UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
1561     this->N |= UsedFlags.N;
1562     this->Z |= UsedFlags.Z;
1563     this->C |= UsedFlags.C;
1564     this->V |= UsedFlags.V;
1565     return *this;
1566   }
1567 };
1568 
1569 } // end anonymous namespace
1570 
1571 /// Find a condition code used by the instruction.
1572 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1573 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1574 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1575   int CCIdx = findCondCodeUseOperandIdxForBranchOrSelect(Instr);
1576   return CCIdx >= 0 ? static_cast<AArch64CC::CondCode>(
1577                           Instr.getOperand(CCIdx).getImm())
1578                     : AArch64CC::Invalid;
1579 }
1580 
1581 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1582   assert(CC != AArch64CC::Invalid);
1583   UsedNZCV UsedFlags;
1584   switch (CC) {
1585   default:
1586     break;
1587 
1588   case AArch64CC::EQ: // Z set
1589   case AArch64CC::NE: // Z clear
1590     UsedFlags.Z = true;
1591     break;
1592 
1593   case AArch64CC::HI: // Z clear and C set
1594   case AArch64CC::LS: // Z set   or  C clear
1595     UsedFlags.Z = true;
1596     LLVM_FALLTHROUGH;
1597   case AArch64CC::HS: // C set
1598   case AArch64CC::LO: // C clear
1599     UsedFlags.C = true;
1600     break;
1601 
1602   case AArch64CC::MI: // N set
1603   case AArch64CC::PL: // N clear
1604     UsedFlags.N = true;
1605     break;
1606 
1607   case AArch64CC::VS: // V set
1608   case AArch64CC::VC: // V clear
1609     UsedFlags.V = true;
1610     break;
1611 
1612   case AArch64CC::GT: // Z clear, N and V the same
1613   case AArch64CC::LE: // Z set,   N and V differ
1614     UsedFlags.Z = true;
1615     LLVM_FALLTHROUGH;
1616   case AArch64CC::GE: // N and V the same
1617   case AArch64CC::LT: // N and V differ
1618     UsedFlags.N = true;
1619     UsedFlags.V = true;
1620     break;
1621   }
1622   return UsedFlags;
1623 }
1624 
1625 /// \returns Conditions flags used after \p CmpInstr in its MachineBB if they
1626 /// are not containing C or V flags and NZCV flags are not alive in successors
1627 /// of the same \p CmpInstr and \p MI parent. \returns None otherwise.
1628 ///
1629 /// Collect instructions using that flags in \p CCUseInstrs if provided.
1630 static Optional<UsedNZCV>
1631 examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
1632                  const TargetRegisterInfo &TRI,
1633                  SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr) {
1634   MachineBasicBlock *CmpParent = CmpInstr.getParent();
1635   if (MI.getParent() != CmpParent)
1636     return None;
1637 
1638   if (areCFlagsAliveInSuccessors(CmpParent))
1639     return None;
1640 
1641   UsedNZCV NZCVUsedAfterCmp;
1642   for (MachineInstr &Instr : instructionsWithoutDebug(
1643            std::next(CmpInstr.getIterator()), CmpParent->instr_end())) {
1644     if (Instr.readsRegister(AArch64::NZCV, &TRI)) {
1645       AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1646       if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1647         return None;
1648       NZCVUsedAfterCmp |= getUsedNZCV(CC);
1649       if (CCUseInstrs)
1650         CCUseInstrs->push_back(&Instr);
1651     }
1652     if (Instr.modifiesRegister(AArch64::NZCV, &TRI))
1653       break;
1654   }
1655   if (NZCVUsedAfterCmp.C || NZCVUsedAfterCmp.V)
1656     return None;
1657   return NZCVUsedAfterCmp;
1658 }
1659 
1660 static bool isADDSRegImm(unsigned Opcode) {
1661   return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1662 }
1663 
1664 static bool isSUBSRegImm(unsigned Opcode) {
1665   return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1666 }
1667 
1668 /// Check if CmpInstr can be substituted by MI.
1669 ///
1670 /// CmpInstr can be substituted:
1671 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1672 /// - and, MI and CmpInstr are from the same MachineBB
1673 /// - and, condition flags are not alive in successors of the CmpInstr parent
1674 /// - and, if MI opcode is the S form there must be no defs of flags between
1675 ///        MI and CmpInstr
1676 ///        or if MI opcode is not the S form there must be neither defs of flags
1677 ///        nor uses of flags between MI and CmpInstr.
1678 /// - and  C/V flags are not used after CmpInstr
1679 static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
1680                                        const TargetRegisterInfo &TRI) {
1681   assert(sForm(MI) != AArch64::INSTRUCTION_LIST_END);
1682 
1683   const unsigned CmpOpcode = CmpInstr.getOpcode();
1684   if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1685     return false;
1686 
1687   if (!examineCFlagsUse(MI, CmpInstr, TRI))
1688     return false;
1689 
1690   AccessKind AccessToCheck = AK_Write;
1691   if (sForm(MI) != MI.getOpcode())
1692     AccessToCheck = AK_All;
1693   return !areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AccessToCheck);
1694 }
1695 
1696 /// Substitute an instruction comparing to zero with another instruction
1697 /// which produces needed condition flags.
1698 ///
1699 /// Return true on success.
1700 bool AArch64InstrInfo::substituteCmpToZero(
1701     MachineInstr &CmpInstr, unsigned SrcReg,
1702     const MachineRegisterInfo &MRI) const {
1703   // Get the unique definition of SrcReg.
1704   MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg);
1705   if (!MI)
1706     return false;
1707 
1708   const TargetRegisterInfo &TRI = getRegisterInfo();
1709 
1710   unsigned NewOpc = sForm(*MI);
1711   if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1712     return false;
1713 
1714   if (!canInstrSubstituteCmpInstr(*MI, CmpInstr, TRI))
1715     return false;
1716 
1717   // Update the instruction to set NZCV.
1718   MI->setDesc(get(NewOpc));
1719   CmpInstr.eraseFromParent();
1720   bool succeeded = UpdateOperandRegClass(*MI);
1721   (void)succeeded;
1722   assert(succeeded && "Some operands reg class are incompatible!");
1723   MI->addRegisterDefined(AArch64::NZCV, &TRI);
1724   return true;
1725 }
1726 
1727 /// \returns True if \p CmpInstr can be removed.
1728 ///
1729 /// \p IsInvertCC is true if, after removing \p CmpInstr, condition
1730 /// codes used in \p CCUseInstrs must be inverted.
1731 static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
1732                                  int CmpValue, const TargetRegisterInfo &TRI,
1733                                  SmallVectorImpl<MachineInstr *> &CCUseInstrs,
1734                                  bool &IsInvertCC) {
1735   assert((CmpValue == 0 || CmpValue == 1) &&
1736          "Only comparisons to 0 or 1 considered for removal!");
1737 
1738   // MI is 'CSINCWr %vreg, wzr, wzr, <cc>' or 'CSINCXr %vreg, xzr, xzr, <cc>'
1739   unsigned MIOpc = MI.getOpcode();
1740   if (MIOpc == AArch64::CSINCWr) {
1741     if (MI.getOperand(1).getReg() != AArch64::WZR ||
1742         MI.getOperand(2).getReg() != AArch64::WZR)
1743       return false;
1744   } else if (MIOpc == AArch64::CSINCXr) {
1745     if (MI.getOperand(1).getReg() != AArch64::XZR ||
1746         MI.getOperand(2).getReg() != AArch64::XZR)
1747       return false;
1748   } else {
1749     return false;
1750   }
1751   AArch64CC::CondCode MICC = findCondCodeUsedByInstr(MI);
1752   if (MICC == AArch64CC::Invalid)
1753     return false;
1754 
1755   // NZCV needs to be defined
1756   if (MI.findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
1757     return false;
1758 
1759   // CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0' or 'SUBS %vreg, 1'
1760   const unsigned CmpOpcode = CmpInstr.getOpcode();
1761   bool IsSubsRegImm = isSUBSRegImm(CmpOpcode);
1762   if (CmpValue && !IsSubsRegImm)
1763     return false;
1764   if (!CmpValue && !IsSubsRegImm && !isADDSRegImm(CmpOpcode))
1765     return false;
1766 
1767   // MI conditions allowed: eq, ne, mi, pl
1768   UsedNZCV MIUsedNZCV = getUsedNZCV(MICC);
1769   if (MIUsedNZCV.C || MIUsedNZCV.V)
1770     return false;
1771 
1772   Optional<UsedNZCV> NZCVUsedAfterCmp =
1773       examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs);
1774   // Condition flags are not used in CmpInstr basic block successors and only
1775   // Z or N flags allowed to be used after CmpInstr within its basic block
1776   if (!NZCVUsedAfterCmp)
1777     return false;
1778   // Z or N flag used after CmpInstr must correspond to the flag used in MI
1779   if ((MIUsedNZCV.Z && NZCVUsedAfterCmp->N) ||
1780       (MIUsedNZCV.N && NZCVUsedAfterCmp->Z))
1781     return false;
1782   // If CmpInstr is comparison to zero MI conditions are limited to eq, ne
1783   if (MIUsedNZCV.N && !CmpValue)
1784     return false;
1785 
1786   // There must be no defs of flags between MI and CmpInstr
1787   if (areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AK_Write))
1788     return false;
1789 
1790   // Condition code is inverted in the following cases:
1791   // 1. MI condition is ne; CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1792   // 2. MI condition is eq, pl; CmpInstr is 'SUBS %vreg, 1'
1793   IsInvertCC = (CmpValue && (MICC == AArch64CC::EQ || MICC == AArch64CC::PL)) ||
1794                (!CmpValue && MICC == AArch64CC::NE);
1795   return true;
1796 }
1797 
1798 /// Remove comparision in csinc-cmp sequence
1799 ///
1800 /// Examples:
1801 /// 1. \code
1802 ///   csinc w9, wzr, wzr, ne
1803 ///   cmp   w9, #0
1804 ///   b.eq
1805 ///    \endcode
1806 /// to
1807 ///    \code
1808 ///   csinc w9, wzr, wzr, ne
1809 ///   b.ne
1810 ///    \endcode
1811 ///
1812 /// 2. \code
1813 ///   csinc x2, xzr, xzr, mi
1814 ///   cmp   x2, #1
1815 ///   b.pl
1816 ///    \endcode
1817 /// to
1818 ///    \code
1819 ///   csinc x2, xzr, xzr, mi
1820 ///   b.pl
1821 ///    \endcode
1822 ///
1823 /// \param  CmpInstr comparison instruction
1824 /// \return True when comparison removed
1825 bool AArch64InstrInfo::removeCmpToZeroOrOne(
1826     MachineInstr &CmpInstr, unsigned SrcReg, int CmpValue,
1827     const MachineRegisterInfo &MRI) const {
1828   MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg);
1829   if (!MI)
1830     return false;
1831   const TargetRegisterInfo &TRI = getRegisterInfo();
1832   SmallVector<MachineInstr *, 4> CCUseInstrs;
1833   bool IsInvertCC = false;
1834   if (!canCmpInstrBeRemoved(*MI, CmpInstr, CmpValue, TRI, CCUseInstrs,
1835                             IsInvertCC))
1836     return false;
1837   // Make transformation
1838   CmpInstr.eraseFromParent();
1839   if (IsInvertCC) {
1840     // Invert condition codes in CmpInstr CC users
1841     for (MachineInstr *CCUseInstr : CCUseInstrs) {
1842       int Idx = findCondCodeUseOperandIdxForBranchOrSelect(*CCUseInstr);
1843       assert(Idx >= 0 && "Unexpected instruction using CC.");
1844       MachineOperand &CCOperand = CCUseInstr->getOperand(Idx);
1845       AArch64CC::CondCode CCUse = AArch64CC::getInvertedCondCode(
1846           static_cast<AArch64CC::CondCode>(CCOperand.getImm()));
1847       CCOperand.setImm(CCUse);
1848     }
1849   }
1850   return true;
1851 }
1852 
1853 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1854   if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD &&
1855       MI.getOpcode() != AArch64::CATCHRET)
1856     return false;
1857 
1858   MachineBasicBlock &MBB = *MI.getParent();
1859   auto &Subtarget = MBB.getParent()->getSubtarget<AArch64Subtarget>();
1860   auto TRI = Subtarget.getRegisterInfo();
1861   DebugLoc DL = MI.getDebugLoc();
1862 
1863   if (MI.getOpcode() == AArch64::CATCHRET) {
1864     // Skip to the first instruction before the epilog.
1865     const TargetInstrInfo *TII =
1866       MBB.getParent()->getSubtarget().getInstrInfo();
1867     MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
1868     auto MBBI = MachineBasicBlock::iterator(MI);
1869     MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI);
1870     while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) &&
1871            FirstEpilogSEH != MBB.begin())
1872       FirstEpilogSEH = std::prev(FirstEpilogSEH);
1873     if (FirstEpilogSEH != MBB.begin())
1874       FirstEpilogSEH = std::next(FirstEpilogSEH);
1875     BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP))
1876         .addReg(AArch64::X0, RegState::Define)
1877         .addMBB(TargetMBB);
1878     BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri))
1879         .addReg(AArch64::X0, RegState::Define)
1880         .addReg(AArch64::X0)
1881         .addMBB(TargetMBB)
1882         .addImm(0);
1883     return true;
1884   }
1885 
1886   Register Reg = MI.getOperand(0).getReg();
1887   Module &M = *MBB.getParent()->getFunction().getParent();
1888   if (M.getStackProtectorGuard() == "sysreg") {
1889     const AArch64SysReg::SysReg *SrcReg =
1890         AArch64SysReg::lookupSysRegByName(M.getStackProtectorGuardReg());
1891     if (!SrcReg)
1892       report_fatal_error("Unknown SysReg for Stack Protector Guard Register");
1893 
1894     // mrs xN, sysreg
1895     BuildMI(MBB, MI, DL, get(AArch64::MRS))
1896         .addDef(Reg, RegState::Renamable)
1897         .addImm(SrcReg->Encoding);
1898     int Offset = M.getStackProtectorGuardOffset();
1899     if (Offset >= 0 && Offset <= 32760 && Offset % 8 == 0) {
1900       // ldr xN, [xN, #offset]
1901       BuildMI(MBB, MI, DL, get(AArch64::LDRXui))
1902           .addDef(Reg)
1903           .addUse(Reg, RegState::Kill)
1904           .addImm(Offset / 8);
1905     } else if (Offset >= -256 && Offset <= 255) {
1906       // ldur xN, [xN, #offset]
1907       BuildMI(MBB, MI, DL, get(AArch64::LDURXi))
1908           .addDef(Reg)
1909           .addUse(Reg, RegState::Kill)
1910           .addImm(Offset);
1911     } else if (Offset >= -4095 && Offset <= 4095) {
1912       if (Offset > 0) {
1913         // add xN, xN, #offset
1914         BuildMI(MBB, MI, DL, get(AArch64::ADDXri))
1915             .addDef(Reg)
1916             .addUse(Reg, RegState::Kill)
1917             .addImm(Offset)
1918             .addImm(0);
1919       } else {
1920         // sub xN, xN, #offset
1921         BuildMI(MBB, MI, DL, get(AArch64::SUBXri))
1922             .addDef(Reg)
1923             .addUse(Reg, RegState::Kill)
1924             .addImm(-Offset)
1925             .addImm(0);
1926       }
1927       // ldr xN, [xN]
1928       BuildMI(MBB, MI, DL, get(AArch64::LDRXui))
1929           .addDef(Reg)
1930           .addUse(Reg, RegState::Kill)
1931           .addImm(0);
1932     } else {
1933       // Cases that are larger than +/- 4095 and not a multiple of 8, or larger
1934       // than 23760.
1935       // It might be nice to use AArch64::MOVi32imm here, which would get
1936       // expanded in PreSched2 after PostRA, but our lone scratch Reg already
1937       // contains the MRS result. findScratchNonCalleeSaveRegister() in
1938       // AArch64FrameLowering might help us find such a scratch register
1939       // though. If we failed to find a scratch register, we could emit a
1940       // stream of add instructions to build up the immediate. Or, we could try
1941       // to insert a AArch64::MOVi32imm before register allocation so that we
1942       // didn't need to scavenge for a scratch register.
1943       report_fatal_error("Unable to encode Stack Protector Guard Offset");
1944     }
1945     MBB.erase(MI);
1946     return true;
1947   }
1948 
1949   const GlobalValue *GV =
1950       cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1951   const TargetMachine &TM = MBB.getParent()->getTarget();
1952   unsigned OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1953   const unsigned char MO_NC = AArch64II::MO_NC;
1954 
1955   if ((OpFlags & AArch64II::MO_GOT) != 0) {
1956     BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1957         .addGlobalAddress(GV, 0, OpFlags);
1958     if (Subtarget.isTargetILP32()) {
1959       unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32);
1960       BuildMI(MBB, MI, DL, get(AArch64::LDRWui))
1961           .addDef(Reg32, RegState::Dead)
1962           .addUse(Reg, RegState::Kill)
1963           .addImm(0)
1964           .addMemOperand(*MI.memoperands_begin())
1965           .addDef(Reg, RegState::Implicit);
1966     } else {
1967       BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1968           .addReg(Reg, RegState::Kill)
1969           .addImm(0)
1970           .addMemOperand(*MI.memoperands_begin());
1971     }
1972   } else if (TM.getCodeModel() == CodeModel::Large) {
1973     assert(!Subtarget.isTargetILP32() && "how can large exist in ILP32?");
1974     BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1975         .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC)
1976         .addImm(0);
1977     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1978         .addReg(Reg, RegState::Kill)
1979         .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC)
1980         .addImm(16);
1981     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1982         .addReg(Reg, RegState::Kill)
1983         .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC)
1984         .addImm(32);
1985     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1986         .addReg(Reg, RegState::Kill)
1987         .addGlobalAddress(GV, 0, AArch64II::MO_G3)
1988         .addImm(48);
1989     BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1990         .addReg(Reg, RegState::Kill)
1991         .addImm(0)
1992         .addMemOperand(*MI.memoperands_begin());
1993   } else if (TM.getCodeModel() == CodeModel::Tiny) {
1994     BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
1995         .addGlobalAddress(GV, 0, OpFlags);
1996   } else {
1997     BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1998         .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1999     unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
2000     if (Subtarget.isTargetILP32()) {
2001       unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32);
2002       BuildMI(MBB, MI, DL, get(AArch64::LDRWui))
2003           .addDef(Reg32, RegState::Dead)
2004           .addUse(Reg, RegState::Kill)
2005           .addGlobalAddress(GV, 0, LoFlags)
2006           .addMemOperand(*MI.memoperands_begin())
2007           .addDef(Reg, RegState::Implicit);
2008     } else {
2009       BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
2010           .addReg(Reg, RegState::Kill)
2011           .addGlobalAddress(GV, 0, LoFlags)
2012           .addMemOperand(*MI.memoperands_begin());
2013     }
2014   }
2015 
2016   MBB.erase(MI);
2017 
2018   return true;
2019 }
2020 
2021 // Return true if this instruction simply sets its single destination register
2022 // to zero. This is equivalent to a register rename of the zero-register.
2023 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) {
2024   switch (MI.getOpcode()) {
2025   default:
2026     break;
2027   case AArch64::MOVZWi:
2028   case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
2029     if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
2030       assert(MI.getDesc().getNumOperands() == 3 &&
2031              MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
2032       return true;
2033     }
2034     break;
2035   case AArch64::ANDWri: // and Rd, Rzr, #imm
2036     return MI.getOperand(1).getReg() == AArch64::WZR;
2037   case AArch64::ANDXri:
2038     return MI.getOperand(1).getReg() == AArch64::XZR;
2039   case TargetOpcode::COPY:
2040     return MI.getOperand(1).getReg() == AArch64::WZR;
2041   }
2042   return false;
2043 }
2044 
2045 // Return true if this instruction simply renames a general register without
2046 // modifying bits.
2047 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {
2048   switch (MI.getOpcode()) {
2049   default:
2050     break;
2051   case TargetOpcode::COPY: {
2052     // GPR32 copies will by lowered to ORRXrs
2053     Register DstReg = MI.getOperand(0).getReg();
2054     return (AArch64::GPR32RegClass.contains(DstReg) ||
2055             AArch64::GPR64RegClass.contains(DstReg));
2056   }
2057   case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
2058     if (MI.getOperand(1).getReg() == AArch64::XZR) {
2059       assert(MI.getDesc().getNumOperands() == 4 &&
2060              MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
2061       return true;
2062     }
2063     break;
2064   case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
2065     if (MI.getOperand(2).getImm() == 0) {
2066       assert(MI.getDesc().getNumOperands() == 4 &&
2067              MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
2068       return true;
2069     }
2070     break;
2071   }
2072   return false;
2073 }
2074 
2075 // Return true if this instruction simply renames a general register without
2076 // modifying bits.
2077 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {
2078   switch (MI.getOpcode()) {
2079   default:
2080     break;
2081   case TargetOpcode::COPY: {
2082     Register DstReg = MI.getOperand(0).getReg();
2083     return AArch64::FPR128RegClass.contains(DstReg);
2084   }
2085   case AArch64::ORRv16i8:
2086     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
2087       assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
2088              "invalid ORRv16i8 operands");
2089       return true;
2090     }
2091     break;
2092   }
2093   return false;
2094 }
2095 
2096 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
2097                                                int &FrameIndex) const {
2098   switch (MI.getOpcode()) {
2099   default:
2100     break;
2101   case AArch64::LDRWui:
2102   case AArch64::LDRXui:
2103   case AArch64::LDRBui:
2104   case AArch64::LDRHui:
2105   case AArch64::LDRSui:
2106   case AArch64::LDRDui:
2107   case AArch64::LDRQui:
2108     if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
2109         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
2110       FrameIndex = MI.getOperand(1).getIndex();
2111       return MI.getOperand(0).getReg();
2112     }
2113     break;
2114   }
2115 
2116   return 0;
2117 }
2118 
2119 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
2120                                               int &FrameIndex) const {
2121   switch (MI.getOpcode()) {
2122   default:
2123     break;
2124   case AArch64::STRWui:
2125   case AArch64::STRXui:
2126   case AArch64::STRBui:
2127   case AArch64::STRHui:
2128   case AArch64::STRSui:
2129   case AArch64::STRDui:
2130   case AArch64::STRQui:
2131   case AArch64::LDR_PXI:
2132   case AArch64::STR_PXI:
2133     if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
2134         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
2135       FrameIndex = MI.getOperand(1).getIndex();
2136       return MI.getOperand(0).getReg();
2137     }
2138     break;
2139   }
2140   return 0;
2141 }
2142 
2143 /// Check all MachineMemOperands for a hint to suppress pairing.
2144 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) {
2145   return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
2146     return MMO->getFlags() & MOSuppressPair;
2147   });
2148 }
2149 
2150 /// Set a flag on the first MachineMemOperand to suppress pairing.
2151 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) {
2152   if (MI.memoperands_empty())
2153     return;
2154   (*MI.memoperands_begin())->setFlags(MOSuppressPair);
2155 }
2156 
2157 /// Check all MachineMemOperands for a hint that the load/store is strided.
2158 bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) {
2159   return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
2160     return MMO->getFlags() & MOStridedAccess;
2161   });
2162 }
2163 
2164 bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) {
2165   switch (Opc) {
2166   default:
2167     return false;
2168   case AArch64::STURSi:
2169   case AArch64::STRSpre:
2170   case AArch64::STURDi:
2171   case AArch64::STRDpre:
2172   case AArch64::STURQi:
2173   case AArch64::STRQpre:
2174   case AArch64::STURBBi:
2175   case AArch64::STURHHi:
2176   case AArch64::STURWi:
2177   case AArch64::STRWpre:
2178   case AArch64::STURXi:
2179   case AArch64::STRXpre:
2180   case AArch64::LDURSi:
2181   case AArch64::LDRSpre:
2182   case AArch64::LDURDi:
2183   case AArch64::LDRDpre:
2184   case AArch64::LDURQi:
2185   case AArch64::LDRQpre:
2186   case AArch64::LDURWi:
2187   case AArch64::LDRWpre:
2188   case AArch64::LDURXi:
2189   case AArch64::LDRXpre:
2190   case AArch64::LDURSWi:
2191   case AArch64::LDURHHi:
2192   case AArch64::LDURBBi:
2193   case AArch64::LDURSBWi:
2194   case AArch64::LDURSHWi:
2195     return true;
2196   }
2197 }
2198 
2199 Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
2200   switch (Opc) {
2201   default: return {};
2202   case AArch64::PRFMui: return AArch64::PRFUMi;
2203   case AArch64::LDRXui: return AArch64::LDURXi;
2204   case AArch64::LDRWui: return AArch64::LDURWi;
2205   case AArch64::LDRBui: return AArch64::LDURBi;
2206   case AArch64::LDRHui: return AArch64::LDURHi;
2207   case AArch64::LDRSui: return AArch64::LDURSi;
2208   case AArch64::LDRDui: return AArch64::LDURDi;
2209   case AArch64::LDRQui: return AArch64::LDURQi;
2210   case AArch64::LDRBBui: return AArch64::LDURBBi;
2211   case AArch64::LDRHHui: return AArch64::LDURHHi;
2212   case AArch64::LDRSBXui: return AArch64::LDURSBXi;
2213   case AArch64::LDRSBWui: return AArch64::LDURSBWi;
2214   case AArch64::LDRSHXui: return AArch64::LDURSHXi;
2215   case AArch64::LDRSHWui: return AArch64::LDURSHWi;
2216   case AArch64::LDRSWui: return AArch64::LDURSWi;
2217   case AArch64::STRXui: return AArch64::STURXi;
2218   case AArch64::STRWui: return AArch64::STURWi;
2219   case AArch64::STRBui: return AArch64::STURBi;
2220   case AArch64::STRHui: return AArch64::STURHi;
2221   case AArch64::STRSui: return AArch64::STURSi;
2222   case AArch64::STRDui: return AArch64::STURDi;
2223   case AArch64::STRQui: return AArch64::STURQi;
2224   case AArch64::STRBBui: return AArch64::STURBBi;
2225   case AArch64::STRHHui: return AArch64::STURHHi;
2226   }
2227 }
2228 
2229 unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
2230   switch (Opc) {
2231   default:
2232     return 2;
2233   case AArch64::LDPXi:
2234   case AArch64::LDPDi:
2235   case AArch64::STPXi:
2236   case AArch64::STPDi:
2237   case AArch64::LDNPXi:
2238   case AArch64::LDNPDi:
2239   case AArch64::STNPXi:
2240   case AArch64::STNPDi:
2241   case AArch64::LDPQi:
2242   case AArch64::STPQi:
2243   case AArch64::LDNPQi:
2244   case AArch64::STNPQi:
2245   case AArch64::LDPWi:
2246   case AArch64::LDPSi:
2247   case AArch64::STPWi:
2248   case AArch64::STPSi:
2249   case AArch64::LDNPWi:
2250   case AArch64::LDNPSi:
2251   case AArch64::STNPWi:
2252   case AArch64::STNPSi:
2253   case AArch64::LDG:
2254   case AArch64::STGPi:
2255 
2256   case AArch64::LD1B_IMM:
2257   case AArch64::LD1B_H_IMM:
2258   case AArch64::LD1B_S_IMM:
2259   case AArch64::LD1B_D_IMM:
2260   case AArch64::LD1SB_H_IMM:
2261   case AArch64::LD1SB_S_IMM:
2262   case AArch64::LD1SB_D_IMM:
2263   case AArch64::LD1H_IMM:
2264   case AArch64::LD1H_S_IMM:
2265   case AArch64::LD1H_D_IMM:
2266   case AArch64::LD1SH_S_IMM:
2267   case AArch64::LD1SH_D_IMM:
2268   case AArch64::LD1W_IMM:
2269   case AArch64::LD1W_D_IMM:
2270   case AArch64::LD1SW_D_IMM:
2271   case AArch64::LD1D_IMM:
2272 
2273   case AArch64::LD2B_IMM:
2274   case AArch64::LD2H_IMM:
2275   case AArch64::LD2W_IMM:
2276   case AArch64::LD2D_IMM:
2277   case AArch64::LD3B_IMM:
2278   case AArch64::LD3H_IMM:
2279   case AArch64::LD3W_IMM:
2280   case AArch64::LD3D_IMM:
2281   case AArch64::LD4B_IMM:
2282   case AArch64::LD4H_IMM:
2283   case AArch64::LD4W_IMM:
2284   case AArch64::LD4D_IMM:
2285 
2286   case AArch64::ST1B_IMM:
2287   case AArch64::ST1B_H_IMM:
2288   case AArch64::ST1B_S_IMM:
2289   case AArch64::ST1B_D_IMM:
2290   case AArch64::ST1H_IMM:
2291   case AArch64::ST1H_S_IMM:
2292   case AArch64::ST1H_D_IMM:
2293   case AArch64::ST1W_IMM:
2294   case AArch64::ST1W_D_IMM:
2295   case AArch64::ST1D_IMM:
2296 
2297   case AArch64::ST2B_IMM:
2298   case AArch64::ST2H_IMM:
2299   case AArch64::ST2W_IMM:
2300   case AArch64::ST2D_IMM:
2301   case AArch64::ST3B_IMM:
2302   case AArch64::ST3H_IMM:
2303   case AArch64::ST3W_IMM:
2304   case AArch64::ST3D_IMM:
2305   case AArch64::ST4B_IMM:
2306   case AArch64::ST4H_IMM:
2307   case AArch64::ST4W_IMM:
2308   case AArch64::ST4D_IMM:
2309 
2310   case AArch64::LD1RB_IMM:
2311   case AArch64::LD1RB_H_IMM:
2312   case AArch64::LD1RB_S_IMM:
2313   case AArch64::LD1RB_D_IMM:
2314   case AArch64::LD1RSB_H_IMM:
2315   case AArch64::LD1RSB_S_IMM:
2316   case AArch64::LD1RSB_D_IMM:
2317   case AArch64::LD1RH_IMM:
2318   case AArch64::LD1RH_S_IMM:
2319   case AArch64::LD1RH_D_IMM:
2320   case AArch64::LD1RSH_S_IMM:
2321   case AArch64::LD1RSH_D_IMM:
2322   case AArch64::LD1RW_IMM:
2323   case AArch64::LD1RW_D_IMM:
2324   case AArch64::LD1RSW_IMM:
2325   case AArch64::LD1RD_IMM:
2326 
2327   case AArch64::LDNT1B_ZRI:
2328   case AArch64::LDNT1H_ZRI:
2329   case AArch64::LDNT1W_ZRI:
2330   case AArch64::LDNT1D_ZRI:
2331   case AArch64::STNT1B_ZRI:
2332   case AArch64::STNT1H_ZRI:
2333   case AArch64::STNT1W_ZRI:
2334   case AArch64::STNT1D_ZRI:
2335 
2336   case AArch64::LDNF1B_IMM:
2337   case AArch64::LDNF1B_H_IMM:
2338   case AArch64::LDNF1B_S_IMM:
2339   case AArch64::LDNF1B_D_IMM:
2340   case AArch64::LDNF1SB_H_IMM:
2341   case AArch64::LDNF1SB_S_IMM:
2342   case AArch64::LDNF1SB_D_IMM:
2343   case AArch64::LDNF1H_IMM:
2344   case AArch64::LDNF1H_S_IMM:
2345   case AArch64::LDNF1H_D_IMM:
2346   case AArch64::LDNF1SH_S_IMM:
2347   case AArch64::LDNF1SH_D_IMM:
2348   case AArch64::LDNF1W_IMM:
2349   case AArch64::LDNF1W_D_IMM:
2350   case AArch64::LDNF1SW_D_IMM:
2351   case AArch64::LDNF1D_IMM:
2352     return 3;
2353   case AArch64::ADDG:
2354   case AArch64::STGOffset:
2355   case AArch64::LDR_PXI:
2356   case AArch64::STR_PXI:
2357     return 2;
2358   }
2359 }
2360 
2361 bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
2362   switch (MI.getOpcode()) {
2363   default:
2364     return false;
2365   // Scaled instructions.
2366   case AArch64::STRSui:
2367   case AArch64::STRDui:
2368   case AArch64::STRQui:
2369   case AArch64::STRXui:
2370   case AArch64::STRWui:
2371   case AArch64::LDRSui:
2372   case AArch64::LDRDui:
2373   case AArch64::LDRQui:
2374   case AArch64::LDRXui:
2375   case AArch64::LDRWui:
2376   case AArch64::LDRSWui:
2377   // Unscaled instructions.
2378   case AArch64::STURSi:
2379   case AArch64::STRSpre:
2380   case AArch64::STURDi:
2381   case AArch64::STRDpre:
2382   case AArch64::STURQi:
2383   case AArch64::STRQpre:
2384   case AArch64::STURWi:
2385   case AArch64::STRWpre:
2386   case AArch64::STURXi:
2387   case AArch64::STRXpre:
2388   case AArch64::LDURSi:
2389   case AArch64::LDRSpre:
2390   case AArch64::LDURDi:
2391   case AArch64::LDRDpre:
2392   case AArch64::LDURQi:
2393   case AArch64::LDRQpre:
2394   case AArch64::LDURWi:
2395   case AArch64::LDRWpre:
2396   case AArch64::LDURXi:
2397   case AArch64::LDRXpre:
2398   case AArch64::LDURSWi:
2399     return true;
2400   }
2401 }
2402 
2403 unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc,
2404                                                    bool &Is64Bit) {
2405   switch (Opc) {
2406   default:
2407     llvm_unreachable("Opcode has no flag setting equivalent!");
2408   // 32-bit cases:
2409   case AArch64::ADDWri:
2410     Is64Bit = false;
2411     return AArch64::ADDSWri;
2412   case AArch64::ADDWrr:
2413     Is64Bit = false;
2414     return AArch64::ADDSWrr;
2415   case AArch64::ADDWrs:
2416     Is64Bit = false;
2417     return AArch64::ADDSWrs;
2418   case AArch64::ADDWrx:
2419     Is64Bit = false;
2420     return AArch64::ADDSWrx;
2421   case AArch64::ANDWri:
2422     Is64Bit = false;
2423     return AArch64::ANDSWri;
2424   case AArch64::ANDWrr:
2425     Is64Bit = false;
2426     return AArch64::ANDSWrr;
2427   case AArch64::ANDWrs:
2428     Is64Bit = false;
2429     return AArch64::ANDSWrs;
2430   case AArch64::BICWrr:
2431     Is64Bit = false;
2432     return AArch64::BICSWrr;
2433   case AArch64::BICWrs:
2434     Is64Bit = false;
2435     return AArch64::BICSWrs;
2436   case AArch64::SUBWri:
2437     Is64Bit = false;
2438     return AArch64::SUBSWri;
2439   case AArch64::SUBWrr:
2440     Is64Bit = false;
2441     return AArch64::SUBSWrr;
2442   case AArch64::SUBWrs:
2443     Is64Bit = false;
2444     return AArch64::SUBSWrs;
2445   case AArch64::SUBWrx:
2446     Is64Bit = false;
2447     return AArch64::SUBSWrx;
2448   // 64-bit cases:
2449   case AArch64::ADDXri:
2450     Is64Bit = true;
2451     return AArch64::ADDSXri;
2452   case AArch64::ADDXrr:
2453     Is64Bit = true;
2454     return AArch64::ADDSXrr;
2455   case AArch64::ADDXrs:
2456     Is64Bit = true;
2457     return AArch64::ADDSXrs;
2458   case AArch64::ADDXrx:
2459     Is64Bit = true;
2460     return AArch64::ADDSXrx;
2461   case AArch64::ANDXri:
2462     Is64Bit = true;
2463     return AArch64::ANDSXri;
2464   case AArch64::ANDXrr:
2465     Is64Bit = true;
2466     return AArch64::ANDSXrr;
2467   case AArch64::ANDXrs:
2468     Is64Bit = true;
2469     return AArch64::ANDSXrs;
2470   case AArch64::BICXrr:
2471     Is64Bit = true;
2472     return AArch64::BICSXrr;
2473   case AArch64::BICXrs:
2474     Is64Bit = true;
2475     return AArch64::BICSXrs;
2476   case AArch64::SUBXri:
2477     Is64Bit = true;
2478     return AArch64::SUBSXri;
2479   case AArch64::SUBXrr:
2480     Is64Bit = true;
2481     return AArch64::SUBSXrr;
2482   case AArch64::SUBXrs:
2483     Is64Bit = true;
2484     return AArch64::SUBSXrs;
2485   case AArch64::SUBXrx:
2486     Is64Bit = true;
2487     return AArch64::SUBSXrx;
2488   }
2489 }
2490 
2491 // Is this a candidate for ld/st merging or pairing?  For example, we don't
2492 // touch volatiles or load/stores that have a hint to avoid pair formation.
2493 bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
2494 
2495   bool IsPreLdSt = isPreLdSt(MI);
2496 
2497   // If this is a volatile load/store, don't mess with it.
2498   if (MI.hasOrderedMemoryRef())
2499     return false;
2500 
2501   // Make sure this is a reg/fi+imm (as opposed to an address reloc).
2502   // For Pre-inc LD/ST, the operand is shifted by one.
2503   assert((MI.getOperand(IsPreLdSt ? 2 : 1).isReg() ||
2504           MI.getOperand(IsPreLdSt ? 2 : 1).isFI()) &&
2505          "Expected a reg or frame index operand.");
2506 
2507   // For Pre-indexed addressing quadword instructions, the third operand is the
2508   // immediate value.
2509   bool IsImmPreLdSt = IsPreLdSt && MI.getOperand(3).isImm();
2510 
2511   if (!MI.getOperand(2).isImm() && !IsImmPreLdSt)
2512     return false;
2513 
2514   // Can't merge/pair if the instruction modifies the base register.
2515   // e.g., ldr x0, [x0]
2516   // This case will never occur with an FI base.
2517   // However, if the instruction is an LDR/STR<S,D,Q,W,X>pre, it can be merged.
2518   // For example:
2519   //   ldr q0, [x11, #32]!
2520   //   ldr q1, [x11, #16]
2521   //   to
2522   //   ldp q0, q1, [x11, #32]!
2523   if (MI.getOperand(1).isReg() && !IsPreLdSt) {
2524     Register BaseReg = MI.getOperand(1).getReg();
2525     const TargetRegisterInfo *TRI = &getRegisterInfo();
2526     if (MI.modifiesRegister(BaseReg, TRI))
2527       return false;
2528   }
2529 
2530   // Check if this load/store has a hint to avoid pair formation.
2531   // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
2532   if (isLdStPairSuppressed(MI))
2533     return false;
2534 
2535   // Do not pair any callee-save store/reload instructions in the
2536   // prologue/epilogue if the CFI information encoded the operations as separate
2537   // instructions, as that will cause the size of the actual prologue to mismatch
2538   // with the prologue size recorded in the Windows CFI.
2539   const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo();
2540   bool NeedsWinCFI = MAI->usesWindowsCFI() &&
2541                      MI.getMF()->getFunction().needsUnwindTableEntry();
2542   if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) ||
2543                       MI.getFlag(MachineInstr::FrameDestroy)))
2544     return false;
2545 
2546   // On some CPUs quad load/store pairs are slower than two single load/stores.
2547   if (Subtarget.isPaired128Slow()) {
2548     switch (MI.getOpcode()) {
2549     default:
2550       break;
2551     case AArch64::LDURQi:
2552     case AArch64::STURQi:
2553     case AArch64::LDRQui:
2554     case AArch64::STRQui:
2555       return false;
2556     }
2557   }
2558 
2559   return true;
2560 }
2561 
2562 bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
2563     const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
2564     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
2565     const TargetRegisterInfo *TRI) const {
2566   if (!LdSt.mayLoadOrStore())
2567     return false;
2568 
2569   const MachineOperand *BaseOp;
2570   if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, OffsetIsScalable,
2571                                     Width, TRI))
2572     return false;
2573   BaseOps.push_back(BaseOp);
2574   return true;
2575 }
2576 
2577 Optional<ExtAddrMode>
2578 AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
2579                                           const TargetRegisterInfo *TRI) const {
2580   const MachineOperand *Base; // Filled with the base operand of MI.
2581   int64_t Offset;             // Filled with the offset of MI.
2582   bool OffsetIsScalable;
2583   if (!getMemOperandWithOffset(MemI, Base, Offset, OffsetIsScalable, TRI))
2584     return None;
2585 
2586   if (!Base->isReg())
2587     return None;
2588   ExtAddrMode AM;
2589   AM.BaseReg = Base->getReg();
2590   AM.Displacement = Offset;
2591   AM.ScaledReg = 0;
2592   AM.Scale = 0;
2593   return AM;
2594 }
2595 
2596 bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
2597     const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
2598     bool &OffsetIsScalable, unsigned &Width,
2599     const TargetRegisterInfo *TRI) const {
2600   assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2601   // Handle only loads/stores with base register followed by immediate offset.
2602   if (LdSt.getNumExplicitOperands() == 3) {
2603     // Non-paired instruction (e.g., ldr x1, [x0, #8]).
2604     if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
2605         !LdSt.getOperand(2).isImm())
2606       return false;
2607   } else if (LdSt.getNumExplicitOperands() == 4) {
2608     // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
2609     if (!LdSt.getOperand(1).isReg() ||
2610         (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) ||
2611         !LdSt.getOperand(3).isImm())
2612       return false;
2613   } else
2614     return false;
2615 
2616   // Get the scaling factor for the instruction and set the width for the
2617   // instruction.
2618   TypeSize Scale(0U, false);
2619   int64_t Dummy1, Dummy2;
2620 
2621   // If this returns false, then it's an instruction we don't want to handle.
2622   if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
2623     return false;
2624 
2625   // Compute the offset. Offset is calculated as the immediate operand
2626   // multiplied by the scaling factor. Unscaled instructions have scaling factor
2627   // set to 1.
2628   if (LdSt.getNumExplicitOperands() == 3) {
2629     BaseOp = &LdSt.getOperand(1);
2630     Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinSize();
2631   } else {
2632     assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
2633     BaseOp = &LdSt.getOperand(2);
2634     Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinSize();
2635   }
2636   OffsetIsScalable = Scale.isScalable();
2637 
2638   if (!BaseOp->isReg() && !BaseOp->isFI())
2639     return false;
2640 
2641   return true;
2642 }
2643 
2644 MachineOperand &
2645 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
2646   assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2647   MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1);
2648   assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
2649   return OfsOp;
2650 }
2651 
2652 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
2653                                     unsigned &Width, int64_t &MinOffset,
2654                                     int64_t &MaxOffset) {
2655   const unsigned SVEMaxBytesPerVector = AArch64::SVEMaxBitsPerVector / 8;
2656   switch (Opcode) {
2657   // Not a memory operation or something we want to handle.
2658   default:
2659     Scale = TypeSize::Fixed(0);
2660     Width = 0;
2661     MinOffset = MaxOffset = 0;
2662     return false;
2663   case AArch64::STRWpost:
2664   case AArch64::LDRWpost:
2665     Width = 32;
2666     Scale = TypeSize::Fixed(4);
2667     MinOffset = -256;
2668     MaxOffset = 255;
2669     break;
2670   case AArch64::LDURQi:
2671   case AArch64::STURQi:
2672     Width = 16;
2673     Scale = TypeSize::Fixed(1);
2674     MinOffset = -256;
2675     MaxOffset = 255;
2676     break;
2677   case AArch64::PRFUMi:
2678   case AArch64::LDURXi:
2679   case AArch64::LDURDi:
2680   case AArch64::STURXi:
2681   case AArch64::STURDi:
2682     Width = 8;
2683     Scale = TypeSize::Fixed(1);
2684     MinOffset = -256;
2685     MaxOffset = 255;
2686     break;
2687   case AArch64::LDURWi:
2688   case AArch64::LDURSi:
2689   case AArch64::LDURSWi:
2690   case AArch64::STURWi:
2691   case AArch64::STURSi:
2692     Width = 4;
2693     Scale = TypeSize::Fixed(1);
2694     MinOffset = -256;
2695     MaxOffset = 255;
2696     break;
2697   case AArch64::LDURHi:
2698   case AArch64::LDURHHi:
2699   case AArch64::LDURSHXi:
2700   case AArch64::LDURSHWi:
2701   case AArch64::STURHi:
2702   case AArch64::STURHHi:
2703     Width = 2;
2704     Scale = TypeSize::Fixed(1);
2705     MinOffset = -256;
2706     MaxOffset = 255;
2707     break;
2708   case AArch64::LDURBi:
2709   case AArch64::LDURBBi:
2710   case AArch64::LDURSBXi:
2711   case AArch64::LDURSBWi:
2712   case AArch64::STURBi:
2713   case AArch64::STURBBi:
2714     Width = 1;
2715     Scale = TypeSize::Fixed(1);
2716     MinOffset = -256;
2717     MaxOffset = 255;
2718     break;
2719   case AArch64::LDPQi:
2720   case AArch64::LDNPQi:
2721   case AArch64::STPQi:
2722   case AArch64::STNPQi:
2723     Scale = TypeSize::Fixed(16);
2724     Width = 32;
2725     MinOffset = -64;
2726     MaxOffset = 63;
2727     break;
2728   case AArch64::LDRQui:
2729   case AArch64::STRQui:
2730     Scale = TypeSize::Fixed(16);
2731     Width = 16;
2732     MinOffset = 0;
2733     MaxOffset = 4095;
2734     break;
2735   case AArch64::LDPXi:
2736   case AArch64::LDPDi:
2737   case AArch64::LDNPXi:
2738   case AArch64::LDNPDi:
2739   case AArch64::STPXi:
2740   case AArch64::STPDi:
2741   case AArch64::STNPXi:
2742   case AArch64::STNPDi:
2743     Scale = TypeSize::Fixed(8);
2744     Width = 16;
2745     MinOffset = -64;
2746     MaxOffset = 63;
2747     break;
2748   case AArch64::PRFMui:
2749   case AArch64::LDRXui:
2750   case AArch64::LDRDui:
2751   case AArch64::STRXui:
2752   case AArch64::STRDui:
2753     Scale = TypeSize::Fixed(8);
2754     Width = 8;
2755     MinOffset = 0;
2756     MaxOffset = 4095;
2757     break;
2758   case AArch64::StoreSwiftAsyncContext:
2759     // Store is an STRXui, but there might be an ADDXri in the expansion too.
2760     Scale = TypeSize::Fixed(1);
2761     Width = 8;
2762     MinOffset = 0;
2763     MaxOffset = 4095;
2764     break;
2765   case AArch64::LDPWi:
2766   case AArch64::LDPSi:
2767   case AArch64::LDNPWi:
2768   case AArch64::LDNPSi:
2769   case AArch64::STPWi:
2770   case AArch64::STPSi:
2771   case AArch64::STNPWi:
2772   case AArch64::STNPSi:
2773     Scale = TypeSize::Fixed(4);
2774     Width = 8;
2775     MinOffset = -64;
2776     MaxOffset = 63;
2777     break;
2778   case AArch64::LDRWui:
2779   case AArch64::LDRSui:
2780   case AArch64::LDRSWui:
2781   case AArch64::STRWui:
2782   case AArch64::STRSui:
2783     Scale = TypeSize::Fixed(4);
2784     Width = 4;
2785     MinOffset = 0;
2786     MaxOffset = 4095;
2787     break;
2788   case AArch64::LDRHui:
2789   case AArch64::LDRHHui:
2790   case AArch64::LDRSHWui:
2791   case AArch64::LDRSHXui:
2792   case AArch64::STRHui:
2793   case AArch64::STRHHui:
2794     Scale = TypeSize::Fixed(2);
2795     Width = 2;
2796     MinOffset = 0;
2797     MaxOffset = 4095;
2798     break;
2799   case AArch64::LDRBui:
2800   case AArch64::LDRBBui:
2801   case AArch64::LDRSBWui:
2802   case AArch64::LDRSBXui:
2803   case AArch64::STRBui:
2804   case AArch64::STRBBui:
2805     Scale = TypeSize::Fixed(1);
2806     Width = 1;
2807     MinOffset = 0;
2808     MaxOffset = 4095;
2809     break;
2810   case AArch64::STPXpre:
2811   case AArch64::LDPXpost:
2812   case AArch64::STPDpre:
2813   case AArch64::LDPDpost:
2814     Scale = TypeSize::Fixed(8);
2815     Width = 8;
2816     MinOffset = -512;
2817     MaxOffset = 504;
2818     break;
2819   case AArch64::STPQpre:
2820   case AArch64::LDPQpost:
2821     Scale = TypeSize::Fixed(16);
2822     Width = 16;
2823     MinOffset = -1024;
2824     MaxOffset = 1008;
2825     break;
2826   case AArch64::STRXpre:
2827   case AArch64::STRDpre:
2828   case AArch64::LDRXpost:
2829   case AArch64::LDRDpost:
2830     Scale = TypeSize::Fixed(1);
2831     Width = 8;
2832     MinOffset = -256;
2833     MaxOffset = 255;
2834     break;
2835   case AArch64::STRQpre:
2836   case AArch64::LDRQpost:
2837     Scale = TypeSize::Fixed(1);
2838     Width = 16;
2839     MinOffset = -256;
2840     MaxOffset = 255;
2841     break;
2842   case AArch64::ADDG:
2843     Scale = TypeSize::Fixed(16);
2844     Width = 0;
2845     MinOffset = 0;
2846     MaxOffset = 63;
2847     break;
2848   case AArch64::TAGPstack:
2849     Scale = TypeSize::Fixed(16);
2850     Width = 0;
2851     // TAGP with a negative offset turns into SUBP, which has a maximum offset
2852     // of 63 (not 64!).
2853     MinOffset = -63;
2854     MaxOffset = 63;
2855     break;
2856   case AArch64::LDG:
2857   case AArch64::STGOffset:
2858   case AArch64::STZGOffset:
2859     Scale = TypeSize::Fixed(16);
2860     Width = 16;
2861     MinOffset = -256;
2862     MaxOffset = 255;
2863     break;
2864   case AArch64::STR_ZZZZXI:
2865   case AArch64::LDR_ZZZZXI:
2866     Scale = TypeSize::Scalable(16);
2867     Width = SVEMaxBytesPerVector * 4;
2868     MinOffset = -256;
2869     MaxOffset = 252;
2870     break;
2871   case AArch64::STR_ZZZXI:
2872   case AArch64::LDR_ZZZXI:
2873     Scale = TypeSize::Scalable(16);
2874     Width = SVEMaxBytesPerVector * 3;
2875     MinOffset = -256;
2876     MaxOffset = 253;
2877     break;
2878   case AArch64::STR_ZZXI:
2879   case AArch64::LDR_ZZXI:
2880     Scale = TypeSize::Scalable(16);
2881     Width = SVEMaxBytesPerVector * 2;
2882     MinOffset = -256;
2883     MaxOffset = 254;
2884     break;
2885   case AArch64::LDR_PXI:
2886   case AArch64::STR_PXI:
2887     Scale = TypeSize::Scalable(2);
2888     Width = SVEMaxBytesPerVector / 8;
2889     MinOffset = -256;
2890     MaxOffset = 255;
2891     break;
2892   case AArch64::LDR_ZXI:
2893   case AArch64::STR_ZXI:
2894     Scale = TypeSize::Scalable(16);
2895     Width = SVEMaxBytesPerVector;
2896     MinOffset = -256;
2897     MaxOffset = 255;
2898     break;
2899   case AArch64::LD1B_IMM:
2900   case AArch64::LD1H_IMM:
2901   case AArch64::LD1W_IMM:
2902   case AArch64::LD1D_IMM:
2903   case AArch64::LDNT1B_ZRI:
2904   case AArch64::LDNT1H_ZRI:
2905   case AArch64::LDNT1W_ZRI:
2906   case AArch64::LDNT1D_ZRI:
2907   case AArch64::ST1B_IMM:
2908   case AArch64::ST1H_IMM:
2909   case AArch64::ST1W_IMM:
2910   case AArch64::ST1D_IMM:
2911   case AArch64::STNT1B_ZRI:
2912   case AArch64::STNT1H_ZRI:
2913   case AArch64::STNT1W_ZRI:
2914   case AArch64::STNT1D_ZRI:
2915   case AArch64::LDNF1B_IMM:
2916   case AArch64::LDNF1H_IMM:
2917   case AArch64::LDNF1W_IMM:
2918   case AArch64::LDNF1D_IMM:
2919     // A full vectors worth of data
2920     // Width = mbytes * elements
2921     Scale = TypeSize::Scalable(16);
2922     Width = SVEMaxBytesPerVector;
2923     MinOffset = -8;
2924     MaxOffset = 7;
2925     break;
2926   case AArch64::LD2B_IMM:
2927   case AArch64::LD2H_IMM:
2928   case AArch64::LD2W_IMM:
2929   case AArch64::LD2D_IMM:
2930   case AArch64::ST2B_IMM:
2931   case AArch64::ST2H_IMM:
2932   case AArch64::ST2W_IMM:
2933   case AArch64::ST2D_IMM:
2934     Scale = TypeSize::Scalable(32);
2935     Width = SVEMaxBytesPerVector * 2;
2936     MinOffset = -8;
2937     MaxOffset = 7;
2938     break;
2939   case AArch64::LD3B_IMM:
2940   case AArch64::LD3H_IMM:
2941   case AArch64::LD3W_IMM:
2942   case AArch64::LD3D_IMM:
2943   case AArch64::ST3B_IMM:
2944   case AArch64::ST3H_IMM:
2945   case AArch64::ST3W_IMM:
2946   case AArch64::ST3D_IMM:
2947     Scale = TypeSize::Scalable(48);
2948     Width = SVEMaxBytesPerVector * 3;
2949     MinOffset = -8;
2950     MaxOffset = 7;
2951     break;
2952   case AArch64::LD4B_IMM:
2953   case AArch64::LD4H_IMM:
2954   case AArch64::LD4W_IMM:
2955   case AArch64::LD4D_IMM:
2956   case AArch64::ST4B_IMM:
2957   case AArch64::ST4H_IMM:
2958   case AArch64::ST4W_IMM:
2959   case AArch64::ST4D_IMM:
2960     Scale = TypeSize::Scalable(64);
2961     Width = SVEMaxBytesPerVector * 4;
2962     MinOffset = -8;
2963     MaxOffset = 7;
2964     break;
2965   case AArch64::LD1B_H_IMM:
2966   case AArch64::LD1SB_H_IMM:
2967   case AArch64::LD1H_S_IMM:
2968   case AArch64::LD1SH_S_IMM:
2969   case AArch64::LD1W_D_IMM:
2970   case AArch64::LD1SW_D_IMM:
2971   case AArch64::ST1B_H_IMM:
2972   case AArch64::ST1H_S_IMM:
2973   case AArch64::ST1W_D_IMM:
2974   case AArch64::LDNF1B_H_IMM:
2975   case AArch64::LDNF1SB_H_IMM:
2976   case AArch64::LDNF1H_S_IMM:
2977   case AArch64::LDNF1SH_S_IMM:
2978   case AArch64::LDNF1W_D_IMM:
2979   case AArch64::LDNF1SW_D_IMM:
2980     // A half vector worth of data
2981     // Width = mbytes * elements
2982     Scale = TypeSize::Scalable(8);
2983     Width = SVEMaxBytesPerVector / 2;
2984     MinOffset = -8;
2985     MaxOffset = 7;
2986     break;
2987   case AArch64::LD1B_S_IMM:
2988   case AArch64::LD1SB_S_IMM:
2989   case AArch64::LD1H_D_IMM:
2990   case AArch64::LD1SH_D_IMM:
2991   case AArch64::ST1B_S_IMM:
2992   case AArch64::ST1H_D_IMM:
2993   case AArch64::LDNF1B_S_IMM:
2994   case AArch64::LDNF1SB_S_IMM:
2995   case AArch64::LDNF1H_D_IMM:
2996   case AArch64::LDNF1SH_D_IMM:
2997     // A quarter vector worth of data
2998     // Width = mbytes * elements
2999     Scale = TypeSize::Scalable(4);
3000     Width = SVEMaxBytesPerVector / 4;
3001     MinOffset = -8;
3002     MaxOffset = 7;
3003     break;
3004   case AArch64::LD1B_D_IMM:
3005   case AArch64::LD1SB_D_IMM:
3006   case AArch64::ST1B_D_IMM:
3007   case AArch64::LDNF1B_D_IMM:
3008   case AArch64::LDNF1SB_D_IMM:
3009     // A eighth vector worth of data
3010     // Width = mbytes * elements
3011     Scale = TypeSize::Scalable(2);
3012     Width = SVEMaxBytesPerVector / 8;
3013     MinOffset = -8;
3014     MaxOffset = 7;
3015     break;
3016   case AArch64::ST2GOffset:
3017   case AArch64::STZ2GOffset:
3018     Scale = TypeSize::Fixed(16);
3019     Width = 32;
3020     MinOffset = -256;
3021     MaxOffset = 255;
3022     break;
3023   case AArch64::STGPi:
3024     Scale = TypeSize::Fixed(16);
3025     Width = 16;
3026     MinOffset = -64;
3027     MaxOffset = 63;
3028     break;
3029   case AArch64::LD1RB_IMM:
3030   case AArch64::LD1RB_H_IMM:
3031   case AArch64::LD1RB_S_IMM:
3032   case AArch64::LD1RB_D_IMM:
3033   case AArch64::LD1RSB_H_IMM:
3034   case AArch64::LD1RSB_S_IMM:
3035   case AArch64::LD1RSB_D_IMM:
3036     Scale = TypeSize::Fixed(1);
3037     Width = 1;
3038     MinOffset = 0;
3039     MaxOffset = 63;
3040     break;
3041   case AArch64::LD1RH_IMM:
3042   case AArch64::LD1RH_S_IMM:
3043   case AArch64::LD1RH_D_IMM:
3044   case AArch64::LD1RSH_S_IMM:
3045   case AArch64::LD1RSH_D_IMM:
3046     Scale = TypeSize::Fixed(2);
3047     Width = 2;
3048     MinOffset = 0;
3049     MaxOffset = 63;
3050     break;
3051   case AArch64::LD1RW_IMM:
3052   case AArch64::LD1RW_D_IMM:
3053   case AArch64::LD1RSW_IMM:
3054     Scale = TypeSize::Fixed(4);
3055     Width = 4;
3056     MinOffset = 0;
3057     MaxOffset = 63;
3058     break;
3059   case AArch64::LD1RD_IMM:
3060     Scale = TypeSize::Fixed(8);
3061     Width = 8;
3062     MinOffset = 0;
3063     MaxOffset = 63;
3064     break;
3065   }
3066 
3067   return true;
3068 }
3069 
3070 // Scaling factor for unscaled load or store.
3071 int AArch64InstrInfo::getMemScale(unsigned Opc) {
3072   switch (Opc) {
3073   default:
3074     llvm_unreachable("Opcode has unknown scale!");
3075   case AArch64::LDRBBui:
3076   case AArch64::LDURBBi:
3077   case AArch64::LDRSBWui:
3078   case AArch64::LDURSBWi:
3079   case AArch64::STRBBui:
3080   case AArch64::STURBBi:
3081     return 1;
3082   case AArch64::LDRHHui:
3083   case AArch64::LDURHHi:
3084   case AArch64::LDRSHWui:
3085   case AArch64::LDURSHWi:
3086   case AArch64::STRHHui:
3087   case AArch64::STURHHi:
3088     return 2;
3089   case AArch64::LDRSui:
3090   case AArch64::LDURSi:
3091   case AArch64::LDRSpre:
3092   case AArch64::LDRSWui:
3093   case AArch64::LDURSWi:
3094   case AArch64::LDRWpre:
3095   case AArch64::LDRWui:
3096   case AArch64::LDURWi:
3097   case AArch64::STRSui:
3098   case AArch64::STURSi:
3099   case AArch64::STRSpre:
3100   case AArch64::STRWui:
3101   case AArch64::STURWi:
3102   case AArch64::STRWpre:
3103   case AArch64::LDPSi:
3104   case AArch64::LDPSWi:
3105   case AArch64::LDPWi:
3106   case AArch64::STPSi:
3107   case AArch64::STPWi:
3108     return 4;
3109   case AArch64::LDRDui:
3110   case AArch64::LDURDi:
3111   case AArch64::LDRDpre:
3112   case AArch64::LDRXui:
3113   case AArch64::LDURXi:
3114   case AArch64::LDRXpre:
3115   case AArch64::STRDui:
3116   case AArch64::STURDi:
3117   case AArch64::STRDpre:
3118   case AArch64::STRXui:
3119   case AArch64::STURXi:
3120   case AArch64::STRXpre:
3121   case AArch64::LDPDi:
3122   case AArch64::LDPXi:
3123   case AArch64::STPDi:
3124   case AArch64::STPXi:
3125     return 8;
3126   case AArch64::LDRQui:
3127   case AArch64::LDURQi:
3128   case AArch64::STRQui:
3129   case AArch64::STURQi:
3130   case AArch64::STRQpre:
3131   case AArch64::LDPQi:
3132   case AArch64::LDRQpre:
3133   case AArch64::STPQi:
3134   case AArch64::STGOffset:
3135   case AArch64::STZGOffset:
3136   case AArch64::ST2GOffset:
3137   case AArch64::STZ2GOffset:
3138   case AArch64::STGPi:
3139     return 16;
3140   }
3141 }
3142 
3143 bool AArch64InstrInfo::isPreLd(const MachineInstr &MI) {
3144   switch (MI.getOpcode()) {
3145   default:
3146     return false;
3147   case AArch64::LDRWpre:
3148   case AArch64::LDRXpre:
3149   case AArch64::LDRSpre:
3150   case AArch64::LDRDpre:
3151   case AArch64::LDRQpre:
3152     return true;
3153   }
3154 }
3155 
3156 bool AArch64InstrInfo::isPreSt(const MachineInstr &MI) {
3157   switch (MI.getOpcode()) {
3158   default:
3159     return false;
3160   case AArch64::STRWpre:
3161   case AArch64::STRXpre:
3162   case AArch64::STRSpre:
3163   case AArch64::STRDpre:
3164   case AArch64::STRQpre:
3165     return true;
3166   }
3167 }
3168 
3169 bool AArch64InstrInfo::isPreLdSt(const MachineInstr &MI) {
3170   return isPreLd(MI) || isPreSt(MI);
3171 }
3172 
3173 // Scale the unscaled offsets.  Returns false if the unscaled offset can't be
3174 // scaled.
3175 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
3176   int Scale = AArch64InstrInfo::getMemScale(Opc);
3177 
3178   // If the byte-offset isn't a multiple of the stride, we can't scale this
3179   // offset.
3180   if (Offset % Scale != 0)
3181     return false;
3182 
3183   // Convert the byte-offset used by unscaled into an "element" offset used
3184   // by the scaled pair load/store instructions.
3185   Offset /= Scale;
3186   return true;
3187 }
3188 
3189 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
3190   if (FirstOpc == SecondOpc)
3191     return true;
3192   // We can also pair sign-ext and zero-ext instructions.
3193   switch (FirstOpc) {
3194   default:
3195     return false;
3196   case AArch64::LDRWui:
3197   case AArch64::LDURWi:
3198     return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
3199   case AArch64::LDRSWui:
3200   case AArch64::LDURSWi:
3201     return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
3202   }
3203   // These instructions can't be paired based on their opcodes.
3204   return false;
3205 }
3206 
3207 static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
3208                             int64_t Offset1, unsigned Opcode1, int FI2,
3209                             int64_t Offset2, unsigned Opcode2) {
3210   // Accesses through fixed stack object frame indices may access a different
3211   // fixed stack slot. Check that the object offsets + offsets match.
3212   if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) {
3213     int64_t ObjectOffset1 = MFI.getObjectOffset(FI1);
3214     int64_t ObjectOffset2 = MFI.getObjectOffset(FI2);
3215     assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.");
3216     // Convert to scaled object offsets.
3217     int Scale1 = AArch64InstrInfo::getMemScale(Opcode1);
3218     if (ObjectOffset1 % Scale1 != 0)
3219       return false;
3220     ObjectOffset1 /= Scale1;
3221     int Scale2 = AArch64InstrInfo::getMemScale(Opcode2);
3222     if (ObjectOffset2 % Scale2 != 0)
3223       return false;
3224     ObjectOffset2 /= Scale2;
3225     ObjectOffset1 += Offset1;
3226     ObjectOffset2 += Offset2;
3227     return ObjectOffset1 + 1 == ObjectOffset2;
3228   }
3229 
3230   return FI1 == FI2;
3231 }
3232 
3233 /// Detect opportunities for ldp/stp formation.
3234 ///
3235 /// Only called for LdSt for which getMemOperandWithOffset returns true.
3236 bool AArch64InstrInfo::shouldClusterMemOps(
3237     ArrayRef<const MachineOperand *> BaseOps1,
3238     ArrayRef<const MachineOperand *> BaseOps2, unsigned NumLoads,
3239     unsigned NumBytes) const {
3240   assert(BaseOps1.size() == 1 && BaseOps2.size() == 1);
3241   const MachineOperand &BaseOp1 = *BaseOps1.front();
3242   const MachineOperand &BaseOp2 = *BaseOps2.front();
3243   const MachineInstr &FirstLdSt = *BaseOp1.getParent();
3244   const MachineInstr &SecondLdSt = *BaseOp2.getParent();
3245   if (BaseOp1.getType() != BaseOp2.getType())
3246     return false;
3247 
3248   assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
3249          "Only base registers and frame indices are supported.");
3250 
3251   // Check for both base regs and base FI.
3252   if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
3253     return false;
3254 
3255   // Only cluster up to a single pair.
3256   if (NumLoads > 2)
3257     return false;
3258 
3259   if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
3260     return false;
3261 
3262   // Can we pair these instructions based on their opcodes?
3263   unsigned FirstOpc = FirstLdSt.getOpcode();
3264   unsigned SecondOpc = SecondLdSt.getOpcode();
3265   if (!canPairLdStOpc(FirstOpc, SecondOpc))
3266     return false;
3267 
3268   // Can't merge volatiles or load/stores that have a hint to avoid pair
3269   // formation, for example.
3270   if (!isCandidateToMergeOrPair(FirstLdSt) ||
3271       !isCandidateToMergeOrPair(SecondLdSt))
3272     return false;
3273 
3274   // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
3275   int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
3276   if (hasUnscaledLdStOffset(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
3277     return false;
3278 
3279   int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
3280   if (hasUnscaledLdStOffset(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
3281     return false;
3282 
3283   // Pairwise instructions have a 7-bit signed offset field.
3284   if (Offset1 > 63 || Offset1 < -64)
3285     return false;
3286 
3287   // The caller should already have ordered First/SecondLdSt by offset.
3288   // Note: except for non-equal frame index bases
3289   if (BaseOp1.isFI()) {
3290     assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&
3291            "Caller should have ordered offsets.");
3292 
3293     const MachineFrameInfo &MFI =
3294         FirstLdSt.getParent()->getParent()->getFrameInfo();
3295     return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc,
3296                            BaseOp2.getIndex(), Offset2, SecondOpc);
3297   }
3298 
3299   assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
3300 
3301   return Offset1 + 1 == Offset2;
3302 }
3303 
3304 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
3305                                             unsigned Reg, unsigned SubIdx,
3306                                             unsigned State,
3307                                             const TargetRegisterInfo *TRI) {
3308   if (!SubIdx)
3309     return MIB.addReg(Reg, State);
3310 
3311   if (Register::isPhysicalRegister(Reg))
3312     return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
3313   return MIB.addReg(Reg, State, SubIdx);
3314 }
3315 
3316 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
3317                                         unsigned NumRegs) {
3318   // We really want the positive remainder mod 32 here, that happens to be
3319   // easily obtainable with a mask.
3320   return ((DestReg - SrcReg) & 0x1f) < NumRegs;
3321 }
3322 
3323 void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
3324                                         MachineBasicBlock::iterator I,
3325                                         const DebugLoc &DL, MCRegister DestReg,
3326                                         MCRegister SrcReg, bool KillSrc,
3327                                         unsigned Opcode,
3328                                         ArrayRef<unsigned> Indices) const {
3329   assert(Subtarget.hasNEON() && "Unexpected register copy without NEON");
3330   const TargetRegisterInfo *TRI = &getRegisterInfo();
3331   uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
3332   uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
3333   unsigned NumRegs = Indices.size();
3334 
3335   int SubReg = 0, End = NumRegs, Incr = 1;
3336   if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
3337     SubReg = NumRegs - 1;
3338     End = -1;
3339     Incr = -1;
3340   }
3341 
3342   for (; SubReg != End; SubReg += Incr) {
3343     const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
3344     AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
3345     AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
3346     AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
3347   }
3348 }
3349 
3350 void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
3351                                        MachineBasicBlock::iterator I,
3352                                        DebugLoc DL, unsigned DestReg,
3353                                        unsigned SrcReg, bool KillSrc,
3354                                        unsigned Opcode, unsigned ZeroReg,
3355                                        llvm::ArrayRef<unsigned> Indices) const {
3356   const TargetRegisterInfo *TRI = &getRegisterInfo();
3357   unsigned NumRegs = Indices.size();
3358 
3359 #ifndef NDEBUG
3360   uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
3361   uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
3362   assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&
3363          "GPR reg sequences should not be able to overlap");
3364 #endif
3365 
3366   for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) {
3367     const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
3368     AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
3369     MIB.addReg(ZeroReg);
3370     AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
3371     MIB.addImm(0);
3372   }
3373 }
3374 
3375 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3376                                    MachineBasicBlock::iterator I,
3377                                    const DebugLoc &DL, MCRegister DestReg,
3378                                    MCRegister SrcReg, bool KillSrc) const {
3379   if (AArch64::GPR32spRegClass.contains(DestReg) &&
3380       (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
3381     const TargetRegisterInfo *TRI = &getRegisterInfo();
3382 
3383     if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
3384       // If either operand is WSP, expand to ADD #0.
3385       if (Subtarget.hasZeroCycleRegMove()) {
3386         // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
3387         MCRegister DestRegX = TRI->getMatchingSuperReg(
3388             DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3389         MCRegister SrcRegX = TRI->getMatchingSuperReg(
3390             SrcReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3391         // This instruction is reading and writing X registers.  This may upset
3392         // the register scavenger and machine verifier, so we need to indicate
3393         // that we are reading an undefined value from SrcRegX, but a proper
3394         // value from SrcReg.
3395         BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
3396             .addReg(SrcRegX, RegState::Undef)
3397             .addImm(0)
3398             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
3399             .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
3400       } else {
3401         BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
3402             .addReg(SrcReg, getKillRegState(KillSrc))
3403             .addImm(0)
3404             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3405       }
3406     } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) {
3407       BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
3408           .addImm(0)
3409           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3410     } else {
3411       if (Subtarget.hasZeroCycleRegMove()) {
3412         // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
3413         MCRegister DestRegX = TRI->getMatchingSuperReg(
3414             DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3415         MCRegister SrcRegX = TRI->getMatchingSuperReg(
3416             SrcReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3417         // This instruction is reading and writing X registers.  This may upset
3418         // the register scavenger and machine verifier, so we need to indicate
3419         // that we are reading an undefined value from SrcRegX, but a proper
3420         // value from SrcReg.
3421         BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
3422             .addReg(AArch64::XZR)
3423             .addReg(SrcRegX, RegState::Undef)
3424             .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
3425       } else {
3426         // Otherwise, expand to ORR WZR.
3427         BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
3428             .addReg(AArch64::WZR)
3429             .addReg(SrcReg, getKillRegState(KillSrc));
3430       }
3431     }
3432     return;
3433   }
3434 
3435   // Copy a Predicate register by ORRing with itself.
3436   if (AArch64::PPRRegClass.contains(DestReg) &&
3437       AArch64::PPRRegClass.contains(SrcReg)) {
3438     assert(Subtarget.hasSVE() && "Unexpected SVE register.");
3439     BuildMI(MBB, I, DL, get(AArch64::ORR_PPzPP), DestReg)
3440       .addReg(SrcReg) // Pg
3441       .addReg(SrcReg)
3442       .addReg(SrcReg, getKillRegState(KillSrc));
3443     return;
3444   }
3445 
3446   // Copy a Z register by ORRing with itself.
3447   if (AArch64::ZPRRegClass.contains(DestReg) &&
3448       AArch64::ZPRRegClass.contains(SrcReg)) {
3449     assert(Subtarget.hasSVE() && "Unexpected SVE register.");
3450     BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg)
3451       .addReg(SrcReg)
3452       .addReg(SrcReg, getKillRegState(KillSrc));
3453     return;
3454   }
3455 
3456   // Copy a Z register pair by copying the individual sub-registers.
3457   if (AArch64::ZPR2RegClass.contains(DestReg) &&
3458       AArch64::ZPR2RegClass.contains(SrcReg)) {
3459     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1};
3460     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3461                      Indices);
3462     return;
3463   }
3464 
3465   // Copy a Z register triple by copying the individual sub-registers.
3466   if (AArch64::ZPR3RegClass.contains(DestReg) &&
3467       AArch64::ZPR3RegClass.contains(SrcReg)) {
3468     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1,
3469                                        AArch64::zsub2};
3470     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3471                      Indices);
3472     return;
3473   }
3474 
3475   // Copy a Z register quad by copying the individual sub-registers.
3476   if (AArch64::ZPR4RegClass.contains(DestReg) &&
3477       AArch64::ZPR4RegClass.contains(SrcReg)) {
3478     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1,
3479                                        AArch64::zsub2, AArch64::zsub3};
3480     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3481                      Indices);
3482     return;
3483   }
3484 
3485   if (AArch64::GPR64spRegClass.contains(DestReg) &&
3486       (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
3487     if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
3488       // If either operand is SP, expand to ADD #0.
3489       BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
3490           .addReg(SrcReg, getKillRegState(KillSrc))
3491           .addImm(0)
3492           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3493     } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) {
3494       BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg)
3495           .addImm(0)
3496           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3497     } else {
3498       // Otherwise, expand to ORR XZR.
3499       BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
3500           .addReg(AArch64::XZR)
3501           .addReg(SrcReg, getKillRegState(KillSrc));
3502     }
3503     return;
3504   }
3505 
3506   // Copy a DDDD register quad by copying the individual sub-registers.
3507   if (AArch64::DDDDRegClass.contains(DestReg) &&
3508       AArch64::DDDDRegClass.contains(SrcReg)) {
3509     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
3510                                        AArch64::dsub2, AArch64::dsub3};
3511     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3512                      Indices);
3513     return;
3514   }
3515 
3516   // Copy a DDD register triple by copying the individual sub-registers.
3517   if (AArch64::DDDRegClass.contains(DestReg) &&
3518       AArch64::DDDRegClass.contains(SrcReg)) {
3519     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
3520                                        AArch64::dsub2};
3521     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3522                      Indices);
3523     return;
3524   }
3525 
3526   // Copy a DD register pair by copying the individual sub-registers.
3527   if (AArch64::DDRegClass.contains(DestReg) &&
3528       AArch64::DDRegClass.contains(SrcReg)) {
3529     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1};
3530     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3531                      Indices);
3532     return;
3533   }
3534 
3535   // Copy a QQQQ register quad by copying the individual sub-registers.
3536   if (AArch64::QQQQRegClass.contains(DestReg) &&
3537       AArch64::QQQQRegClass.contains(SrcReg)) {
3538     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
3539                                        AArch64::qsub2, AArch64::qsub3};
3540     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3541                      Indices);
3542     return;
3543   }
3544 
3545   // Copy a QQQ register triple by copying the individual sub-registers.
3546   if (AArch64::QQQRegClass.contains(DestReg) &&
3547       AArch64::QQQRegClass.contains(SrcReg)) {
3548     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
3549                                        AArch64::qsub2};
3550     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3551                      Indices);
3552     return;
3553   }
3554 
3555   // Copy a QQ register pair by copying the individual sub-registers.
3556   if (AArch64::QQRegClass.contains(DestReg) &&
3557       AArch64::QQRegClass.contains(SrcReg)) {
3558     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1};
3559     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3560                      Indices);
3561     return;
3562   }
3563 
3564   if (AArch64::XSeqPairsClassRegClass.contains(DestReg) &&
3565       AArch64::XSeqPairsClassRegClass.contains(SrcReg)) {
3566     static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64};
3567     copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs,
3568                     AArch64::XZR, Indices);
3569     return;
3570   }
3571 
3572   if (AArch64::WSeqPairsClassRegClass.contains(DestReg) &&
3573       AArch64::WSeqPairsClassRegClass.contains(SrcReg)) {
3574     static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32};
3575     copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs,
3576                     AArch64::WZR, Indices);
3577     return;
3578   }
3579 
3580   if (AArch64::FPR128RegClass.contains(DestReg) &&
3581       AArch64::FPR128RegClass.contains(SrcReg)) {
3582     if (Subtarget.hasNEON()) {
3583       BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
3584           .addReg(SrcReg)
3585           .addReg(SrcReg, getKillRegState(KillSrc));
3586     } else {
3587       BuildMI(MBB, I, DL, get(AArch64::STRQpre))
3588           .addReg(AArch64::SP, RegState::Define)
3589           .addReg(SrcReg, getKillRegState(KillSrc))
3590           .addReg(AArch64::SP)
3591           .addImm(-16);
3592       BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
3593           .addReg(AArch64::SP, RegState::Define)
3594           .addReg(DestReg, RegState::Define)
3595           .addReg(AArch64::SP)
3596           .addImm(16);
3597     }
3598     return;
3599   }
3600 
3601   if (AArch64::FPR64RegClass.contains(DestReg) &&
3602       AArch64::FPR64RegClass.contains(SrcReg)) {
3603     BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
3604         .addReg(SrcReg, getKillRegState(KillSrc));
3605     return;
3606   }
3607 
3608   if (AArch64::FPR32RegClass.contains(DestReg) &&
3609       AArch64::FPR32RegClass.contains(SrcReg)) {
3610     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3611         .addReg(SrcReg, getKillRegState(KillSrc));
3612     return;
3613   }
3614 
3615   if (AArch64::FPR16RegClass.contains(DestReg) &&
3616       AArch64::FPR16RegClass.contains(SrcReg)) {
3617     DestReg =
3618         RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR32RegClass);
3619     SrcReg =
3620         RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR32RegClass);
3621     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3622         .addReg(SrcReg, getKillRegState(KillSrc));
3623     return;
3624   }
3625 
3626   if (AArch64::FPR8RegClass.contains(DestReg) &&
3627       AArch64::FPR8RegClass.contains(SrcReg)) {
3628     DestReg =
3629         RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR32RegClass);
3630     SrcReg =
3631         RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR32RegClass);
3632     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3633         .addReg(SrcReg, getKillRegState(KillSrc));
3634     return;
3635   }
3636 
3637   // Copies between GPR64 and FPR64.
3638   if (AArch64::FPR64RegClass.contains(DestReg) &&
3639       AArch64::GPR64RegClass.contains(SrcReg)) {
3640     BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
3641         .addReg(SrcReg, getKillRegState(KillSrc));
3642     return;
3643   }
3644   if (AArch64::GPR64RegClass.contains(DestReg) &&
3645       AArch64::FPR64RegClass.contains(SrcReg)) {
3646     BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
3647         .addReg(SrcReg, getKillRegState(KillSrc));
3648     return;
3649   }
3650   // Copies between GPR32 and FPR32.
3651   if (AArch64::FPR32RegClass.contains(DestReg) &&
3652       AArch64::GPR32RegClass.contains(SrcReg)) {
3653     BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
3654         .addReg(SrcReg, getKillRegState(KillSrc));
3655     return;
3656   }
3657   if (AArch64::GPR32RegClass.contains(DestReg) &&
3658       AArch64::FPR32RegClass.contains(SrcReg)) {
3659     BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
3660         .addReg(SrcReg, getKillRegState(KillSrc));
3661     return;
3662   }
3663 
3664   if (DestReg == AArch64::NZCV) {
3665     assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
3666     BuildMI(MBB, I, DL, get(AArch64::MSR))
3667         .addImm(AArch64SysReg::NZCV)
3668         .addReg(SrcReg, getKillRegState(KillSrc))
3669         .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
3670     return;
3671   }
3672 
3673   if (SrcReg == AArch64::NZCV) {
3674     assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
3675     BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
3676         .addImm(AArch64SysReg::NZCV)
3677         .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
3678     return;
3679   }
3680 
3681 #ifndef NDEBUG
3682   const TargetRegisterInfo &TRI = getRegisterInfo();
3683   errs() << TRI.getRegAsmName(DestReg) << " = COPY "
3684          << TRI.getRegAsmName(SrcReg) << "\n";
3685 #endif
3686   llvm_unreachable("unimplemented reg-to-reg copy");
3687 }
3688 
3689 static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
3690                                     MachineBasicBlock &MBB,
3691                                     MachineBasicBlock::iterator InsertBefore,
3692                                     const MCInstrDesc &MCID,
3693                                     Register SrcReg, bool IsKill,
3694                                     unsigned SubIdx0, unsigned SubIdx1, int FI,
3695                                     MachineMemOperand *MMO) {
3696   Register SrcReg0 = SrcReg;
3697   Register SrcReg1 = SrcReg;
3698   if (Register::isPhysicalRegister(SrcReg)) {
3699     SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
3700     SubIdx0 = 0;
3701     SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
3702     SubIdx1 = 0;
3703   }
3704   BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
3705       .addReg(SrcReg0, getKillRegState(IsKill), SubIdx0)
3706       .addReg(SrcReg1, getKillRegState(IsKill), SubIdx1)
3707       .addFrameIndex(FI)
3708       .addImm(0)
3709       .addMemOperand(MMO);
3710 }
3711 
3712 void AArch64InstrInfo::storeRegToStackSlot(
3713     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
3714     bool isKill, int FI, const TargetRegisterClass *RC,
3715     const TargetRegisterInfo *TRI) const {
3716   MachineFunction &MF = *MBB.getParent();
3717   MachineFrameInfo &MFI = MF.getFrameInfo();
3718 
3719   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
3720   MachineMemOperand *MMO =
3721       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
3722                               MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
3723   unsigned Opc = 0;
3724   bool Offset = true;
3725   unsigned StackID = TargetStackID::Default;
3726   switch (TRI->getSpillSize(*RC)) {
3727   case 1:
3728     if (AArch64::FPR8RegClass.hasSubClassEq(RC))
3729       Opc = AArch64::STRBui;
3730     break;
3731   case 2:
3732     if (AArch64::FPR16RegClass.hasSubClassEq(RC))
3733       Opc = AArch64::STRHui;
3734     else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
3735       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3736       Opc = AArch64::STR_PXI;
3737       StackID = TargetStackID::ScalableVector;
3738     }
3739     break;
3740   case 4:
3741     if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
3742       Opc = AArch64::STRWui;
3743       if (Register::isVirtualRegister(SrcReg))
3744         MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
3745       else
3746         assert(SrcReg != AArch64::WSP);
3747     } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
3748       Opc = AArch64::STRSui;
3749     break;
3750   case 8:
3751     if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
3752       Opc = AArch64::STRXui;
3753       if (Register::isVirtualRegister(SrcReg))
3754         MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
3755       else
3756         assert(SrcReg != AArch64::SP);
3757     } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
3758       Opc = AArch64::STRDui;
3759     } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
3760       storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
3761                               get(AArch64::STPWi), SrcReg, isKill,
3762                               AArch64::sube32, AArch64::subo32, FI, MMO);
3763       return;
3764     }
3765     break;
3766   case 16:
3767     if (AArch64::FPR128RegClass.hasSubClassEq(RC))
3768       Opc = AArch64::STRQui;
3769     else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
3770       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3771       Opc = AArch64::ST1Twov1d;
3772       Offset = false;
3773     } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
3774       storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
3775                               get(AArch64::STPXi), SrcReg, isKill,
3776                               AArch64::sube64, AArch64::subo64, FI, MMO);
3777       return;
3778     } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) {
3779       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3780       Opc = AArch64::STR_ZXI;
3781       StackID = TargetStackID::ScalableVector;
3782     }
3783     break;
3784   case 24:
3785     if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
3786       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3787       Opc = AArch64::ST1Threev1d;
3788       Offset = false;
3789     }
3790     break;
3791   case 32:
3792     if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
3793       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3794       Opc = AArch64::ST1Fourv1d;
3795       Offset = false;
3796     } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
3797       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3798       Opc = AArch64::ST1Twov2d;
3799       Offset = false;
3800     } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) {
3801       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3802       Opc = AArch64::STR_ZZXI;
3803       StackID = TargetStackID::ScalableVector;
3804     }
3805     break;
3806   case 48:
3807     if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
3808       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3809       Opc = AArch64::ST1Threev2d;
3810       Offset = false;
3811     } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) {
3812       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3813       Opc = AArch64::STR_ZZZXI;
3814       StackID = TargetStackID::ScalableVector;
3815     }
3816     break;
3817   case 64:
3818     if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
3819       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3820       Opc = AArch64::ST1Fourv2d;
3821       Offset = false;
3822     } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) {
3823       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3824       Opc = AArch64::STR_ZZZZXI;
3825       StackID = TargetStackID::ScalableVector;
3826     }
3827     break;
3828   }
3829   assert(Opc && "Unknown register class");
3830   MFI.setStackID(FI, StackID);
3831 
3832   const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
3833                                      .addReg(SrcReg, getKillRegState(isKill))
3834                                      .addFrameIndex(FI);
3835 
3836   if (Offset)
3837     MI.addImm(0);
3838   MI.addMemOperand(MMO);
3839 }
3840 
3841 static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
3842                                      MachineBasicBlock &MBB,
3843                                      MachineBasicBlock::iterator InsertBefore,
3844                                      const MCInstrDesc &MCID,
3845                                      Register DestReg, unsigned SubIdx0,
3846                                      unsigned SubIdx1, int FI,
3847                                      MachineMemOperand *MMO) {
3848   Register DestReg0 = DestReg;
3849   Register DestReg1 = DestReg;
3850   bool IsUndef = true;
3851   if (Register::isPhysicalRegister(DestReg)) {
3852     DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
3853     SubIdx0 = 0;
3854     DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
3855     SubIdx1 = 0;
3856     IsUndef = false;
3857   }
3858   BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
3859       .addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0)
3860       .addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1)
3861       .addFrameIndex(FI)
3862       .addImm(0)
3863       .addMemOperand(MMO);
3864 }
3865 
3866 void AArch64InstrInfo::loadRegFromStackSlot(
3867     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
3868     int FI, const TargetRegisterClass *RC,
3869     const TargetRegisterInfo *TRI) const {
3870   MachineFunction &MF = *MBB.getParent();
3871   MachineFrameInfo &MFI = MF.getFrameInfo();
3872   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
3873   MachineMemOperand *MMO =
3874       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
3875                               MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
3876 
3877   unsigned Opc = 0;
3878   bool Offset = true;
3879   unsigned StackID = TargetStackID::Default;
3880   switch (TRI->getSpillSize(*RC)) {
3881   case 1:
3882     if (AArch64::FPR8RegClass.hasSubClassEq(RC))
3883       Opc = AArch64::LDRBui;
3884     break;
3885   case 2:
3886     if (AArch64::FPR16RegClass.hasSubClassEq(RC))
3887       Opc = AArch64::LDRHui;
3888     else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
3889       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
3890       Opc = AArch64::LDR_PXI;
3891       StackID = TargetStackID::ScalableVector;
3892     }
3893     break;
3894   case 4:
3895     if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
3896       Opc = AArch64::LDRWui;
3897       if (Register::isVirtualRegister(DestReg))
3898         MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
3899       else
3900         assert(DestReg != AArch64::WSP);
3901     } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
3902       Opc = AArch64::LDRSui;
3903     break;
3904   case 8:
3905     if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
3906       Opc = AArch64::LDRXui;
3907       if (Register::isVirtualRegister(DestReg))
3908         MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
3909       else
3910         assert(DestReg != AArch64::SP);
3911     } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
3912       Opc = AArch64::LDRDui;
3913     } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
3914       loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
3915                                get(AArch64::LDPWi), DestReg, AArch64::sube32,
3916                                AArch64::subo32, FI, MMO);
3917       return;
3918     }
3919     break;
3920   case 16:
3921     if (AArch64::FPR128RegClass.hasSubClassEq(RC))
3922       Opc = AArch64::LDRQui;
3923     else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
3924       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3925       Opc = AArch64::LD1Twov1d;
3926       Offset = false;
3927     } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
3928       loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
3929                                get(AArch64::LDPXi), DestReg, AArch64::sube64,
3930                                AArch64::subo64, FI, MMO);
3931       return;
3932     } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) {
3933       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
3934       Opc = AArch64::LDR_ZXI;
3935       StackID = TargetStackID::ScalableVector;
3936     }
3937     break;
3938   case 24:
3939     if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
3940       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3941       Opc = AArch64::LD1Threev1d;
3942       Offset = false;
3943     }
3944     break;
3945   case 32:
3946     if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
3947       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3948       Opc = AArch64::LD1Fourv1d;
3949       Offset = false;
3950     } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
3951       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3952       Opc = AArch64::LD1Twov2d;
3953       Offset = false;
3954     } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) {
3955       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
3956       Opc = AArch64::LDR_ZZXI;
3957       StackID = TargetStackID::ScalableVector;
3958     }
3959     break;
3960   case 48:
3961     if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
3962       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3963       Opc = AArch64::LD1Threev2d;
3964       Offset = false;
3965     } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) {
3966       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
3967       Opc = AArch64::LDR_ZZZXI;
3968       StackID = TargetStackID::ScalableVector;
3969     }
3970     break;
3971   case 64:
3972     if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
3973       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
3974       Opc = AArch64::LD1Fourv2d;
3975       Offset = false;
3976     } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) {
3977       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
3978       Opc = AArch64::LDR_ZZZZXI;
3979       StackID = TargetStackID::ScalableVector;
3980     }
3981     break;
3982   }
3983 
3984   assert(Opc && "Unknown register class");
3985   MFI.setStackID(FI, StackID);
3986 
3987   const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
3988                                      .addReg(DestReg, getDefRegState(true))
3989                                      .addFrameIndex(FI);
3990   if (Offset)
3991     MI.addImm(0);
3992   MI.addMemOperand(MMO);
3993 }
3994 
3995 bool llvm::isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
3996                                            const MachineInstr &UseMI,
3997                                            const TargetRegisterInfo *TRI) {
3998   return any_of(instructionsWithoutDebug(std::next(DefMI.getIterator()),
3999                                          UseMI.getIterator()),
4000                 [TRI](const MachineInstr &I) {
4001                   return I.modifiesRegister(AArch64::NZCV, TRI) ||
4002                          I.readsRegister(AArch64::NZCV, TRI);
4003                 });
4004 }
4005 
4006 void AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
4007     const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized) {
4008   // The smallest scalable element supported by scaled SVE addressing
4009   // modes are predicates, which are 2 scalable bytes in size. So the scalable
4010   // byte offset must always be a multiple of 2.
4011   assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
4012 
4013   // VGSized offsets are divided by '2', because the VG register is the
4014   // the number of 64bit granules as opposed to 128bit vector chunks,
4015   // which is how the 'n' in e.g. MVT::nxv1i8 is modelled.
4016   // So, for a stack offset of 16 MVT::nxv1i8's, the size is n x 16 bytes.
4017   // VG = n * 2 and the dwarf offset must be VG * 8 bytes.
4018   ByteSized = Offset.getFixed();
4019   VGSized = Offset.getScalable() / 2;
4020 }
4021 
4022 /// Returns the offset in parts to which this frame offset can be
4023 /// decomposed for the purpose of describing a frame offset.
4024 /// For non-scalable offsets this is simply its byte size.
4025 void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
4026     const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors,
4027     int64_t &NumDataVectors) {
4028   // The smallest scalable element supported by scaled SVE addressing
4029   // modes are predicates, which are 2 scalable bytes in size. So the scalable
4030   // byte offset must always be a multiple of 2.
4031   assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
4032 
4033   NumBytes = Offset.getFixed();
4034   NumDataVectors = 0;
4035   NumPredicateVectors = Offset.getScalable() / 2;
4036   // This method is used to get the offsets to adjust the frame offset.
4037   // If the function requires ADDPL to be used and needs more than two ADDPL
4038   // instructions, part of the offset is folded into NumDataVectors so that it
4039   // uses ADDVL for part of it, reducing the number of ADDPL instructions.
4040   if (NumPredicateVectors % 8 == 0 || NumPredicateVectors < -64 ||
4041       NumPredicateVectors > 62) {
4042     NumDataVectors = NumPredicateVectors / 8;
4043     NumPredicateVectors -= NumDataVectors * 8;
4044   }
4045 }
4046 
4047 // Helper function to emit a frame offset adjustment from a given
4048 // pointer (SrcReg), stored into DestReg. This function is explicit
4049 // in that it requires the opcode.
4050 static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
4051                                MachineBasicBlock::iterator MBBI,
4052                                const DebugLoc &DL, unsigned DestReg,
4053                                unsigned SrcReg, int64_t Offset, unsigned Opc,
4054                                const TargetInstrInfo *TII,
4055                                MachineInstr::MIFlag Flag, bool NeedsWinCFI,
4056                                bool *HasWinCFI) {
4057   int Sign = 1;
4058   unsigned MaxEncoding, ShiftSize;
4059   switch (Opc) {
4060   case AArch64::ADDXri:
4061   case AArch64::ADDSXri:
4062   case AArch64::SUBXri:
4063   case AArch64::SUBSXri:
4064     MaxEncoding = 0xfff;
4065     ShiftSize = 12;
4066     break;
4067   case AArch64::ADDVL_XXI:
4068   case AArch64::ADDPL_XXI:
4069     MaxEncoding = 31;
4070     ShiftSize = 0;
4071     if (Offset < 0) {
4072       MaxEncoding = 32;
4073       Sign = -1;
4074       Offset = -Offset;
4075     }
4076     break;
4077   default:
4078     llvm_unreachable("Unsupported opcode");
4079   }
4080 
4081   // FIXME: If the offset won't fit in 24-bits, compute the offset into a
4082   // scratch register.  If DestReg is a virtual register, use it as the
4083   // scratch register; otherwise, create a new virtual register (to be
4084   // replaced by the scavenger at the end of PEI).  That case can be optimized
4085   // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
4086   // register can be loaded with offset%8 and the add/sub can use an extending
4087   // instruction with LSL#3.
4088   // Currently the function handles any offsets but generates a poor sequence
4089   // of code.
4090   //  assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
4091 
4092   const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
4093   Register TmpReg = DestReg;
4094   if (TmpReg == AArch64::XZR)
4095     TmpReg = MBB.getParent()->getRegInfo().createVirtualRegister(
4096         &AArch64::GPR64RegClass);
4097   do {
4098     uint64_t ThisVal = std::min<uint64_t>(Offset, MaxEncodableValue);
4099     unsigned LocalShiftSize = 0;
4100     if (ThisVal > MaxEncoding) {
4101       ThisVal = ThisVal >> ShiftSize;
4102       LocalShiftSize = ShiftSize;
4103     }
4104     assert((ThisVal >> ShiftSize) <= MaxEncoding &&
4105            "Encoding cannot handle value that big");
4106 
4107     Offset -= ThisVal << LocalShiftSize;
4108     if (Offset == 0)
4109       TmpReg = DestReg;
4110     auto MBI = BuildMI(MBB, MBBI, DL, TII->get(Opc), TmpReg)
4111                    .addReg(SrcReg)
4112                    .addImm(Sign * (int)ThisVal);
4113     if (ShiftSize)
4114       MBI = MBI.addImm(
4115           AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize));
4116     MBI = MBI.setMIFlag(Flag);
4117 
4118     if (NeedsWinCFI) {
4119       assert(Sign == 1 && "SEH directives should always have a positive sign");
4120       int Imm = (int)(ThisVal << LocalShiftSize);
4121       if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
4122           (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
4123         if (HasWinCFI)
4124           *HasWinCFI = true;
4125         if (Imm == 0)
4126           BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).setMIFlag(Flag);
4127         else
4128           BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP))
4129               .addImm(Imm)
4130               .setMIFlag(Flag);
4131         assert(Offset == 0 && "Expected remaining offset to be zero to "
4132                               "emit a single SEH directive");
4133       } else if (DestReg == AArch64::SP) {
4134         if (HasWinCFI)
4135           *HasWinCFI = true;
4136         assert(SrcReg == AArch64::SP && "Unexpected SrcReg for SEH_StackAlloc");
4137         BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
4138             .addImm(Imm)
4139             .setMIFlag(Flag);
4140       }
4141       if (HasWinCFI)
4142         *HasWinCFI = true;
4143     }
4144 
4145     SrcReg = TmpReg;
4146   } while (Offset);
4147 }
4148 
4149 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
4150                            MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
4151                            unsigned DestReg, unsigned SrcReg,
4152                            StackOffset Offset, const TargetInstrInfo *TII,
4153                            MachineInstr::MIFlag Flag, bool SetNZCV,
4154                            bool NeedsWinCFI, bool *HasWinCFI) {
4155   int64_t Bytes, NumPredicateVectors, NumDataVectors;
4156   AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
4157       Offset, Bytes, NumPredicateVectors, NumDataVectors);
4158 
4159   // First emit non-scalable frame offsets, or a simple 'mov'.
4160   if (Bytes || (!Offset && SrcReg != DestReg)) {
4161     assert((DestReg != AArch64::SP || Bytes % 8 == 0) &&
4162            "SP increment/decrement not 8-byte aligned");
4163     unsigned Opc = SetNZCV ? AArch64::ADDSXri : AArch64::ADDXri;
4164     if (Bytes < 0) {
4165       Bytes = -Bytes;
4166       Opc = SetNZCV ? AArch64::SUBSXri : AArch64::SUBXri;
4167     }
4168     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, Bytes, Opc, TII, Flag,
4169                        NeedsWinCFI, HasWinCFI);
4170     SrcReg = DestReg;
4171   }
4172 
4173   assert(!(SetNZCV && (NumPredicateVectors || NumDataVectors)) &&
4174          "SetNZCV not supported with SVE vectors");
4175   assert(!(NeedsWinCFI && (NumPredicateVectors || NumDataVectors)) &&
4176          "WinCFI not supported with SVE vectors");
4177 
4178   if (NumDataVectors) {
4179     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumDataVectors,
4180                        AArch64::ADDVL_XXI, TII, Flag, NeedsWinCFI, nullptr);
4181     SrcReg = DestReg;
4182   }
4183 
4184   if (NumPredicateVectors) {
4185     assert(DestReg != AArch64::SP && "Unaligned access to SP");
4186     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumPredicateVectors,
4187                        AArch64::ADDPL_XXI, TII, Flag, NeedsWinCFI, nullptr);
4188   }
4189 }
4190 
4191 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
4192     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
4193     MachineBasicBlock::iterator InsertPt, int FrameIndex,
4194     LiveIntervals *LIS, VirtRegMap *VRM) const {
4195   // This is a bit of a hack. Consider this instruction:
4196   //
4197   //   %0 = COPY %sp; GPR64all:%0
4198   //
4199   // We explicitly chose GPR64all for the virtual register so such a copy might
4200   // be eliminated by RegisterCoalescer. However, that may not be possible, and
4201   // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
4202   // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
4203   //
4204   // To prevent that, we are going to constrain the %0 register class here.
4205   //
4206   // <rdar://problem/11522048>
4207   //
4208   if (MI.isFullCopy()) {
4209     Register DstReg = MI.getOperand(0).getReg();
4210     Register SrcReg = MI.getOperand(1).getReg();
4211     if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) {
4212       MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
4213       return nullptr;
4214     }
4215     if (DstReg == AArch64::SP && Register::isVirtualRegister(SrcReg)) {
4216       MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
4217       return nullptr;
4218     }
4219   }
4220 
4221   // Handle the case where a copy is being spilled or filled but the source
4222   // and destination register class don't match.  For example:
4223   //
4224   //   %0 = COPY %xzr; GPR64common:%0
4225   //
4226   // In this case we can still safely fold away the COPY and generate the
4227   // following spill code:
4228   //
4229   //   STRXui %xzr, %stack.0
4230   //
4231   // This also eliminates spilled cross register class COPYs (e.g. between x and
4232   // d regs) of the same size.  For example:
4233   //
4234   //   %0 = COPY %1; GPR64:%0, FPR64:%1
4235   //
4236   // will be filled as
4237   //
4238   //   LDRDui %0, fi<#0>
4239   //
4240   // instead of
4241   //
4242   //   LDRXui %Temp, fi<#0>
4243   //   %0 = FMOV %Temp
4244   //
4245   if (MI.isCopy() && Ops.size() == 1 &&
4246       // Make sure we're only folding the explicit COPY defs/uses.
4247       (Ops[0] == 0 || Ops[0] == 1)) {
4248     bool IsSpill = Ops[0] == 0;
4249     bool IsFill = !IsSpill;
4250     const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4251     const MachineRegisterInfo &MRI = MF.getRegInfo();
4252     MachineBasicBlock &MBB = *MI.getParent();
4253     const MachineOperand &DstMO = MI.getOperand(0);
4254     const MachineOperand &SrcMO = MI.getOperand(1);
4255     Register DstReg = DstMO.getReg();
4256     Register SrcReg = SrcMO.getReg();
4257     // This is slightly expensive to compute for physical regs since
4258     // getMinimalPhysRegClass is slow.
4259     auto getRegClass = [&](unsigned Reg) {
4260       return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg)
4261                                               : TRI.getMinimalPhysRegClass(Reg);
4262     };
4263 
4264     if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
4265       assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
4266                  TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
4267              "Mismatched register size in non subreg COPY");
4268       if (IsSpill)
4269         storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
4270                             getRegClass(SrcReg), &TRI);
4271       else
4272         loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
4273                              getRegClass(DstReg), &TRI);
4274       return &*--InsertPt;
4275     }
4276 
4277     // Handle cases like spilling def of:
4278     //
4279     //   %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
4280     //
4281     // where the physical register source can be widened and stored to the full
4282     // virtual reg destination stack slot, in this case producing:
4283     //
4284     //   STRXui %xzr, %stack.0
4285     //
4286     if (IsSpill && DstMO.isUndef() && Register::isPhysicalRegister(SrcReg)) {
4287       assert(SrcMO.getSubReg() == 0 &&
4288              "Unexpected subreg on physical register");
4289       const TargetRegisterClass *SpillRC;
4290       unsigned SpillSubreg;
4291       switch (DstMO.getSubReg()) {
4292       default:
4293         SpillRC = nullptr;
4294         break;
4295       case AArch64::sub_32:
4296       case AArch64::ssub:
4297         if (AArch64::GPR32RegClass.contains(SrcReg)) {
4298           SpillRC = &AArch64::GPR64RegClass;
4299           SpillSubreg = AArch64::sub_32;
4300         } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
4301           SpillRC = &AArch64::FPR64RegClass;
4302           SpillSubreg = AArch64::ssub;
4303         } else
4304           SpillRC = nullptr;
4305         break;
4306       case AArch64::dsub:
4307         if (AArch64::FPR64RegClass.contains(SrcReg)) {
4308           SpillRC = &AArch64::FPR128RegClass;
4309           SpillSubreg = AArch64::dsub;
4310         } else
4311           SpillRC = nullptr;
4312         break;
4313       }
4314 
4315       if (SpillRC)
4316         if (unsigned WidenedSrcReg =
4317                 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
4318           storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
4319                               FrameIndex, SpillRC, &TRI);
4320           return &*--InsertPt;
4321         }
4322     }
4323 
4324     // Handle cases like filling use of:
4325     //
4326     //   %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
4327     //
4328     // where we can load the full virtual reg source stack slot, into the subreg
4329     // destination, in this case producing:
4330     //
4331     //   LDRWui %0:sub_32<def,read-undef>, %stack.0
4332     //
4333     if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
4334       const TargetRegisterClass *FillRC;
4335       switch (DstMO.getSubReg()) {
4336       default:
4337         FillRC = nullptr;
4338         break;
4339       case AArch64::sub_32:
4340         FillRC = &AArch64::GPR32RegClass;
4341         break;
4342       case AArch64::ssub:
4343         FillRC = &AArch64::FPR32RegClass;
4344         break;
4345       case AArch64::dsub:
4346         FillRC = &AArch64::FPR64RegClass;
4347         break;
4348       }
4349 
4350       if (FillRC) {
4351         assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
4352                    TRI.getRegSizeInBits(*FillRC) &&
4353                "Mismatched regclass size on folded subreg COPY");
4354         loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
4355         MachineInstr &LoadMI = *--InsertPt;
4356         MachineOperand &LoadDst = LoadMI.getOperand(0);
4357         assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
4358         LoadDst.setSubReg(DstMO.getSubReg());
4359         LoadDst.setIsUndef();
4360         return &LoadMI;
4361       }
4362     }
4363   }
4364 
4365   // Cannot fold.
4366   return nullptr;
4367 }
4368 
4369 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
4370                                     StackOffset &SOffset,
4371                                     bool *OutUseUnscaledOp,
4372                                     unsigned *OutUnscaledOp,
4373                                     int64_t *EmittableOffset) {
4374   // Set output values in case of early exit.
4375   if (EmittableOffset)
4376     *EmittableOffset = 0;
4377   if (OutUseUnscaledOp)
4378     *OutUseUnscaledOp = false;
4379   if (OutUnscaledOp)
4380     *OutUnscaledOp = 0;
4381 
4382   // Exit early for structured vector spills/fills as they can't take an
4383   // immediate offset.
4384   switch (MI.getOpcode()) {
4385   default:
4386     break;
4387   case AArch64::LD1Twov2d:
4388   case AArch64::LD1Threev2d:
4389   case AArch64::LD1Fourv2d:
4390   case AArch64::LD1Twov1d:
4391   case AArch64::LD1Threev1d:
4392   case AArch64::LD1Fourv1d:
4393   case AArch64::ST1Twov2d:
4394   case AArch64::ST1Threev2d:
4395   case AArch64::ST1Fourv2d:
4396   case AArch64::ST1Twov1d:
4397   case AArch64::ST1Threev1d:
4398   case AArch64::ST1Fourv1d:
4399   case AArch64::ST1i8:
4400   case AArch64::ST1i16:
4401   case AArch64::ST1i32:
4402   case AArch64::ST1i64:
4403   case AArch64::IRG:
4404   case AArch64::IRGstack:
4405   case AArch64::STGloop:
4406   case AArch64::STZGloop:
4407     return AArch64FrameOffsetCannotUpdate;
4408   }
4409 
4410   // Get the min/max offset and the scale.
4411   TypeSize ScaleValue(0U, false);
4412   unsigned Width;
4413   int64_t MinOff, MaxOff;
4414   if (!AArch64InstrInfo::getMemOpInfo(MI.getOpcode(), ScaleValue, Width, MinOff,
4415                                       MaxOff))
4416     llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
4417 
4418   // Construct the complete offset.
4419   bool IsMulVL = ScaleValue.isScalable();
4420   unsigned Scale = ScaleValue.getKnownMinSize();
4421   int64_t Offset = IsMulVL ? SOffset.getScalable() : SOffset.getFixed();
4422 
4423   const MachineOperand &ImmOpnd =
4424       MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode()));
4425   Offset += ImmOpnd.getImm() * Scale;
4426 
4427   // If the offset doesn't match the scale, we rewrite the instruction to
4428   // use the unscaled instruction instead. Likewise, if we have a negative
4429   // offset and there is an unscaled op to use.
4430   Optional<unsigned> UnscaledOp =
4431       AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode());
4432   bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
4433   if (useUnscaledOp &&
4434       !AArch64InstrInfo::getMemOpInfo(*UnscaledOp, ScaleValue, Width, MinOff,
4435                                       MaxOff))
4436     llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
4437 
4438   Scale = ScaleValue.getKnownMinSize();
4439   assert(IsMulVL == ScaleValue.isScalable() &&
4440          "Unscaled opcode has different value for scalable");
4441 
4442   int64_t Remainder = Offset % Scale;
4443   assert(!(Remainder && useUnscaledOp) &&
4444          "Cannot have remainder when using unscaled op");
4445 
4446   assert(MinOff < MaxOff && "Unexpected Min/Max offsets");
4447   int64_t NewOffset = Offset / Scale;
4448   if (MinOff <= NewOffset && NewOffset <= MaxOff)
4449     Offset = Remainder;
4450   else {
4451     NewOffset = NewOffset < 0 ? MinOff : MaxOff;
4452     Offset = Offset - NewOffset * Scale + Remainder;
4453   }
4454 
4455   if (EmittableOffset)
4456     *EmittableOffset = NewOffset;
4457   if (OutUseUnscaledOp)
4458     *OutUseUnscaledOp = useUnscaledOp;
4459   if (OutUnscaledOp && UnscaledOp)
4460     *OutUnscaledOp = *UnscaledOp;
4461 
4462   if (IsMulVL)
4463     SOffset = StackOffset::get(SOffset.getFixed(), Offset);
4464   else
4465     SOffset = StackOffset::get(Offset, SOffset.getScalable());
4466   return AArch64FrameOffsetCanUpdate |
4467          (SOffset ? 0 : AArch64FrameOffsetIsLegal);
4468 }
4469 
4470 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
4471                                     unsigned FrameReg, StackOffset &Offset,
4472                                     const AArch64InstrInfo *TII) {
4473   unsigned Opcode = MI.getOpcode();
4474   unsigned ImmIdx = FrameRegIdx + 1;
4475 
4476   if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
4477     Offset += StackOffset::getFixed(MI.getOperand(ImmIdx).getImm());
4478     emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
4479                     MI.getOperand(0).getReg(), FrameReg, Offset, TII,
4480                     MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
4481     MI.eraseFromParent();
4482     Offset = StackOffset();
4483     return true;
4484   }
4485 
4486   int64_t NewOffset;
4487   unsigned UnscaledOp;
4488   bool UseUnscaledOp;
4489   int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
4490                                          &UnscaledOp, &NewOffset);
4491   if (Status & AArch64FrameOffsetCanUpdate) {
4492     if (Status & AArch64FrameOffsetIsLegal)
4493       // Replace the FrameIndex with FrameReg.
4494       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
4495     if (UseUnscaledOp)
4496       MI.setDesc(TII->get(UnscaledOp));
4497 
4498     MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
4499     return !Offset;
4500   }
4501 
4502   return false;
4503 }
4504 
4505 MCInst AArch64InstrInfo::getNop() const {
4506   return MCInstBuilder(AArch64::HINT).addImm(0);
4507 }
4508 
4509 // AArch64 supports MachineCombiner.
4510 bool AArch64InstrInfo::useMachineCombiner() const { return true; }
4511 
4512 // True when Opc sets flag
4513 static bool isCombineInstrSettingFlag(unsigned Opc) {
4514   switch (Opc) {
4515   case AArch64::ADDSWrr:
4516   case AArch64::ADDSWri:
4517   case AArch64::ADDSXrr:
4518   case AArch64::ADDSXri:
4519   case AArch64::SUBSWrr:
4520   case AArch64::SUBSXrr:
4521   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4522   case AArch64::SUBSWri:
4523   case AArch64::SUBSXri:
4524     return true;
4525   default:
4526     break;
4527   }
4528   return false;
4529 }
4530 
4531 // 32b Opcodes that can be combined with a MUL
4532 static bool isCombineInstrCandidate32(unsigned Opc) {
4533   switch (Opc) {
4534   case AArch64::ADDWrr:
4535   case AArch64::ADDWri:
4536   case AArch64::SUBWrr:
4537   case AArch64::ADDSWrr:
4538   case AArch64::ADDSWri:
4539   case AArch64::SUBSWrr:
4540   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4541   case AArch64::SUBWri:
4542   case AArch64::SUBSWri:
4543     return true;
4544   default:
4545     break;
4546   }
4547   return false;
4548 }
4549 
4550 // 64b Opcodes that can be combined with a MUL
4551 static bool isCombineInstrCandidate64(unsigned Opc) {
4552   switch (Opc) {
4553   case AArch64::ADDXrr:
4554   case AArch64::ADDXri:
4555   case AArch64::SUBXrr:
4556   case AArch64::ADDSXrr:
4557   case AArch64::ADDSXri:
4558   case AArch64::SUBSXrr:
4559   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4560   case AArch64::SUBXri:
4561   case AArch64::SUBSXri:
4562   case AArch64::ADDv8i8:
4563   case AArch64::ADDv16i8:
4564   case AArch64::ADDv4i16:
4565   case AArch64::ADDv8i16:
4566   case AArch64::ADDv2i32:
4567   case AArch64::ADDv4i32:
4568   case AArch64::SUBv8i8:
4569   case AArch64::SUBv16i8:
4570   case AArch64::SUBv4i16:
4571   case AArch64::SUBv8i16:
4572   case AArch64::SUBv2i32:
4573   case AArch64::SUBv4i32:
4574     return true;
4575   default:
4576     break;
4577   }
4578   return false;
4579 }
4580 
4581 // FP Opcodes that can be combined with a FMUL.
4582 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
4583   switch (Inst.getOpcode()) {
4584   default:
4585     break;
4586   case AArch64::FADDHrr:
4587   case AArch64::FADDSrr:
4588   case AArch64::FADDDrr:
4589   case AArch64::FADDv4f16:
4590   case AArch64::FADDv8f16:
4591   case AArch64::FADDv2f32:
4592   case AArch64::FADDv2f64:
4593   case AArch64::FADDv4f32:
4594   case AArch64::FSUBHrr:
4595   case AArch64::FSUBSrr:
4596   case AArch64::FSUBDrr:
4597   case AArch64::FSUBv4f16:
4598   case AArch64::FSUBv8f16:
4599   case AArch64::FSUBv2f32:
4600   case AArch64::FSUBv2f64:
4601   case AArch64::FSUBv4f32:
4602     TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
4603     // We can fuse FADD/FSUB with FMUL, if fusion is either allowed globally by
4604     // the target options or if FADD/FSUB has the contract fast-math flag.
4605     return Options.UnsafeFPMath ||
4606            Options.AllowFPOpFusion == FPOpFusion::Fast ||
4607            Inst.getFlag(MachineInstr::FmContract);
4608     return true;
4609   }
4610   return false;
4611 }
4612 
4613 // Opcodes that can be combined with a MUL
4614 static bool isCombineInstrCandidate(unsigned Opc) {
4615   return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
4616 }
4617 
4618 //
4619 // Utility routine that checks if \param MO is defined by an
4620 // \param CombineOpc instruction in the basic block \param MBB
4621 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
4622                        unsigned CombineOpc, unsigned ZeroReg = 0,
4623                        bool CheckZeroReg = false) {
4624   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4625   MachineInstr *MI = nullptr;
4626 
4627   if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
4628     MI = MRI.getUniqueVRegDef(MO.getReg());
4629   // And it needs to be in the trace (otherwise, it won't have a depth).
4630   if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
4631     return false;
4632   // Must only used by the user we combine with.
4633   if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
4634     return false;
4635 
4636   if (CheckZeroReg) {
4637     assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
4638            MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
4639            MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
4640     // The third input reg must be zero.
4641     if (MI->getOperand(3).getReg() != ZeroReg)
4642       return false;
4643   }
4644 
4645   return true;
4646 }
4647 
4648 //
4649 // Is \param MO defined by an integer multiply and can be combined?
4650 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
4651                               unsigned MulOpc, unsigned ZeroReg) {
4652   return canCombine(MBB, MO, MulOpc, ZeroReg, true);
4653 }
4654 
4655 //
4656 // Is \param MO defined by a floating-point multiply and can be combined?
4657 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
4658                                unsigned MulOpc) {
4659   return canCombine(MBB, MO, MulOpc);
4660 }
4661 
4662 // TODO: There are many more machine instruction opcodes to match:
4663 //       1. Other data types (integer, vectors)
4664 //       2. Other math / logic operations (xor, or)
4665 //       3. Other forms of the same operation (intrinsics and other variants)
4666 bool AArch64InstrInfo::isAssociativeAndCommutative(
4667     const MachineInstr &Inst) const {
4668   switch (Inst.getOpcode()) {
4669   case AArch64::FADDDrr:
4670   case AArch64::FADDSrr:
4671   case AArch64::FADDv2f32:
4672   case AArch64::FADDv2f64:
4673   case AArch64::FADDv4f32:
4674   case AArch64::FMULDrr:
4675   case AArch64::FMULSrr:
4676   case AArch64::FMULX32:
4677   case AArch64::FMULX64:
4678   case AArch64::FMULXv2f32:
4679   case AArch64::FMULXv2f64:
4680   case AArch64::FMULXv4f32:
4681   case AArch64::FMULv2f32:
4682   case AArch64::FMULv2f64:
4683   case AArch64::FMULv4f32:
4684     return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
4685   default:
4686     return false;
4687   }
4688 }
4689 
4690 /// Find instructions that can be turned into madd.
4691 static bool getMaddPatterns(MachineInstr &Root,
4692                             SmallVectorImpl<MachineCombinerPattern> &Patterns) {
4693   unsigned Opc = Root.getOpcode();
4694   MachineBasicBlock &MBB = *Root.getParent();
4695   bool Found = false;
4696 
4697   if (!isCombineInstrCandidate(Opc))
4698     return false;
4699   if (isCombineInstrSettingFlag(Opc)) {
4700     int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
4701     // When NZCV is live bail out.
4702     if (Cmp_NZCV == -1)
4703       return false;
4704     unsigned NewOpc = convertToNonFlagSettingOpc(Root);
4705     // When opcode can't change bail out.
4706     // CHECKME: do we miss any cases for opcode conversion?
4707     if (NewOpc == Opc)
4708       return false;
4709     Opc = NewOpc;
4710   }
4711 
4712   auto setFound = [&](int Opcode, int Operand, unsigned ZeroReg,
4713                       MachineCombinerPattern Pattern) {
4714     if (canCombineWithMUL(MBB, Root.getOperand(Operand), Opcode, ZeroReg)) {
4715       Patterns.push_back(Pattern);
4716       Found = true;
4717     }
4718   };
4719 
4720   auto setVFound = [&](int Opcode, int Operand, MachineCombinerPattern Pattern) {
4721     if (canCombine(MBB, Root.getOperand(Operand), Opcode)) {
4722       Patterns.push_back(Pattern);
4723       Found = true;
4724     }
4725   };
4726 
4727   typedef MachineCombinerPattern MCP;
4728 
4729   switch (Opc) {
4730   default:
4731     break;
4732   case AArch64::ADDWrr:
4733     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
4734            "ADDWrr does not have register operands");
4735     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDW_OP1);
4736     setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULADDW_OP2);
4737     break;
4738   case AArch64::ADDXrr:
4739     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDX_OP1);
4740     setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULADDX_OP2);
4741     break;
4742   case AArch64::SUBWrr:
4743     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBW_OP1);
4744     setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULSUBW_OP2);
4745     break;
4746   case AArch64::SUBXrr:
4747     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBX_OP1);
4748     setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULSUBX_OP2);
4749     break;
4750   case AArch64::ADDWri:
4751     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDWI_OP1);
4752     break;
4753   case AArch64::ADDXri:
4754     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDXI_OP1);
4755     break;
4756   case AArch64::SUBWri:
4757     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBWI_OP1);
4758     break;
4759   case AArch64::SUBXri:
4760     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBXI_OP1);
4761     break;
4762   case AArch64::ADDv8i8:
4763     setVFound(AArch64::MULv8i8, 1, MCP::MULADDv8i8_OP1);
4764     setVFound(AArch64::MULv8i8, 2, MCP::MULADDv8i8_OP2);
4765     break;
4766   case AArch64::ADDv16i8:
4767     setVFound(AArch64::MULv16i8, 1, MCP::MULADDv16i8_OP1);
4768     setVFound(AArch64::MULv16i8, 2, MCP::MULADDv16i8_OP2);
4769     break;
4770   case AArch64::ADDv4i16:
4771     setVFound(AArch64::MULv4i16, 1, MCP::MULADDv4i16_OP1);
4772     setVFound(AArch64::MULv4i16, 2, MCP::MULADDv4i16_OP2);
4773     setVFound(AArch64::MULv4i16_indexed, 1, MCP::MULADDv4i16_indexed_OP1);
4774     setVFound(AArch64::MULv4i16_indexed, 2, MCP::MULADDv4i16_indexed_OP2);
4775     break;
4776   case AArch64::ADDv8i16:
4777     setVFound(AArch64::MULv8i16, 1, MCP::MULADDv8i16_OP1);
4778     setVFound(AArch64::MULv8i16, 2, MCP::MULADDv8i16_OP2);
4779     setVFound(AArch64::MULv8i16_indexed, 1, MCP::MULADDv8i16_indexed_OP1);
4780     setVFound(AArch64::MULv8i16_indexed, 2, MCP::MULADDv8i16_indexed_OP2);
4781     break;
4782   case AArch64::ADDv2i32:
4783     setVFound(AArch64::MULv2i32, 1, MCP::MULADDv2i32_OP1);
4784     setVFound(AArch64::MULv2i32, 2, MCP::MULADDv2i32_OP2);
4785     setVFound(AArch64::MULv2i32_indexed, 1, MCP::MULADDv2i32_indexed_OP1);
4786     setVFound(AArch64::MULv2i32_indexed, 2, MCP::MULADDv2i32_indexed_OP2);
4787     break;
4788   case AArch64::ADDv4i32:
4789     setVFound(AArch64::MULv4i32, 1, MCP::MULADDv4i32_OP1);
4790     setVFound(AArch64::MULv4i32, 2, MCP::MULADDv4i32_OP2);
4791     setVFound(AArch64::MULv4i32_indexed, 1, MCP::MULADDv4i32_indexed_OP1);
4792     setVFound(AArch64::MULv4i32_indexed, 2, MCP::MULADDv4i32_indexed_OP2);
4793     break;
4794   case AArch64::SUBv8i8:
4795     setVFound(AArch64::MULv8i8, 1, MCP::MULSUBv8i8_OP1);
4796     setVFound(AArch64::MULv8i8, 2, MCP::MULSUBv8i8_OP2);
4797     break;
4798   case AArch64::SUBv16i8:
4799     setVFound(AArch64::MULv16i8, 1, MCP::MULSUBv16i8_OP1);
4800     setVFound(AArch64::MULv16i8, 2, MCP::MULSUBv16i8_OP2);
4801     break;
4802   case AArch64::SUBv4i16:
4803     setVFound(AArch64::MULv4i16, 1, MCP::MULSUBv4i16_OP1);
4804     setVFound(AArch64::MULv4i16, 2, MCP::MULSUBv4i16_OP2);
4805     setVFound(AArch64::MULv4i16_indexed, 1, MCP::MULSUBv4i16_indexed_OP1);
4806     setVFound(AArch64::MULv4i16_indexed, 2, MCP::MULSUBv4i16_indexed_OP2);
4807     break;
4808   case AArch64::SUBv8i16:
4809     setVFound(AArch64::MULv8i16, 1, MCP::MULSUBv8i16_OP1);
4810     setVFound(AArch64::MULv8i16, 2, MCP::MULSUBv8i16_OP2);
4811     setVFound(AArch64::MULv8i16_indexed, 1, MCP::MULSUBv8i16_indexed_OP1);
4812     setVFound(AArch64::MULv8i16_indexed, 2, MCP::MULSUBv8i16_indexed_OP2);
4813     break;
4814   case AArch64::SUBv2i32:
4815     setVFound(AArch64::MULv2i32, 1, MCP::MULSUBv2i32_OP1);
4816     setVFound(AArch64::MULv2i32, 2, MCP::MULSUBv2i32_OP2);
4817     setVFound(AArch64::MULv2i32_indexed, 1, MCP::MULSUBv2i32_indexed_OP1);
4818     setVFound(AArch64::MULv2i32_indexed, 2, MCP::MULSUBv2i32_indexed_OP2);
4819     break;
4820   case AArch64::SUBv4i32:
4821     setVFound(AArch64::MULv4i32, 1, MCP::MULSUBv4i32_OP1);
4822     setVFound(AArch64::MULv4i32, 2, MCP::MULSUBv4i32_OP2);
4823     setVFound(AArch64::MULv4i32_indexed, 1, MCP::MULSUBv4i32_indexed_OP1);
4824     setVFound(AArch64::MULv4i32_indexed, 2, MCP::MULSUBv4i32_indexed_OP2);
4825     break;
4826   }
4827   return Found;
4828 }
4829 /// Floating-Point Support
4830 
4831 /// Find instructions that can be turned into madd.
4832 static bool getFMAPatterns(MachineInstr &Root,
4833                            SmallVectorImpl<MachineCombinerPattern> &Patterns) {
4834 
4835   if (!isCombineInstrCandidateFP(Root))
4836     return false;
4837 
4838   MachineBasicBlock &MBB = *Root.getParent();
4839   bool Found = false;
4840 
4841   auto Match = [&](int Opcode, int Operand,
4842                    MachineCombinerPattern Pattern) -> bool {
4843     if (canCombineWithFMUL(MBB, Root.getOperand(Operand), Opcode)) {
4844       Patterns.push_back(Pattern);
4845       return true;
4846     }
4847     return false;
4848   };
4849 
4850   typedef MachineCombinerPattern MCP;
4851 
4852   switch (Root.getOpcode()) {
4853   default:
4854     assert(false && "Unsupported FP instruction in combiner\n");
4855     break;
4856   case AArch64::FADDHrr:
4857     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
4858            "FADDHrr does not have register operands");
4859 
4860     Found  = Match(AArch64::FMULHrr, 1, MCP::FMULADDH_OP1);
4861     Found |= Match(AArch64::FMULHrr, 2, MCP::FMULADDH_OP2);
4862     break;
4863   case AArch64::FADDSrr:
4864     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
4865            "FADDSrr does not have register operands");
4866 
4867     Found |= Match(AArch64::FMULSrr, 1, MCP::FMULADDS_OP1) ||
4868              Match(AArch64::FMULv1i32_indexed, 1, MCP::FMLAv1i32_indexed_OP1);
4869 
4870     Found |= Match(AArch64::FMULSrr, 2, MCP::FMULADDS_OP2) ||
4871              Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLAv1i32_indexed_OP2);
4872     break;
4873   case AArch64::FADDDrr:
4874     Found |= Match(AArch64::FMULDrr, 1, MCP::FMULADDD_OP1) ||
4875              Match(AArch64::FMULv1i64_indexed, 1, MCP::FMLAv1i64_indexed_OP1);
4876 
4877     Found |= Match(AArch64::FMULDrr, 2, MCP::FMULADDD_OP2) ||
4878              Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLAv1i64_indexed_OP2);
4879     break;
4880   case AArch64::FADDv4f16:
4881     Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLAv4i16_indexed_OP1) ||
4882              Match(AArch64::FMULv4f16, 1, MCP::FMLAv4f16_OP1);
4883 
4884     Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLAv4i16_indexed_OP2) ||
4885              Match(AArch64::FMULv4f16, 2, MCP::FMLAv4f16_OP2);
4886     break;
4887   case AArch64::FADDv8f16:
4888     Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLAv8i16_indexed_OP1) ||
4889              Match(AArch64::FMULv8f16, 1, MCP::FMLAv8f16_OP1);
4890 
4891     Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLAv8i16_indexed_OP2) ||
4892              Match(AArch64::FMULv8f16, 2, MCP::FMLAv8f16_OP2);
4893     break;
4894   case AArch64::FADDv2f32:
4895     Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLAv2i32_indexed_OP1) ||
4896              Match(AArch64::FMULv2f32, 1, MCP::FMLAv2f32_OP1);
4897 
4898     Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLAv2i32_indexed_OP2) ||
4899              Match(AArch64::FMULv2f32, 2, MCP::FMLAv2f32_OP2);
4900     break;
4901   case AArch64::FADDv2f64:
4902     Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLAv2i64_indexed_OP1) ||
4903              Match(AArch64::FMULv2f64, 1, MCP::FMLAv2f64_OP1);
4904 
4905     Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLAv2i64_indexed_OP2) ||
4906              Match(AArch64::FMULv2f64, 2, MCP::FMLAv2f64_OP2);
4907     break;
4908   case AArch64::FADDv4f32:
4909     Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLAv4i32_indexed_OP1) ||
4910              Match(AArch64::FMULv4f32, 1, MCP::FMLAv4f32_OP1);
4911 
4912     Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLAv4i32_indexed_OP2) ||
4913              Match(AArch64::FMULv4f32, 2, MCP::FMLAv4f32_OP2);
4914     break;
4915   case AArch64::FSUBHrr:
4916     Found  = Match(AArch64::FMULHrr, 1, MCP::FMULSUBH_OP1);
4917     Found |= Match(AArch64::FMULHrr, 2, MCP::FMULSUBH_OP2);
4918     Found |= Match(AArch64::FNMULHrr, 1, MCP::FNMULSUBH_OP1);
4919     break;
4920   case AArch64::FSUBSrr:
4921     Found = Match(AArch64::FMULSrr, 1, MCP::FMULSUBS_OP1);
4922 
4923     Found |= Match(AArch64::FMULSrr, 2, MCP::FMULSUBS_OP2) ||
4924              Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLSv1i32_indexed_OP2);
4925 
4926     Found |= Match(AArch64::FNMULSrr, 1, MCP::FNMULSUBS_OP1);
4927     break;
4928   case AArch64::FSUBDrr:
4929     Found = Match(AArch64::FMULDrr, 1, MCP::FMULSUBD_OP1);
4930 
4931     Found |= Match(AArch64::FMULDrr, 2, MCP::FMULSUBD_OP2) ||
4932              Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLSv1i64_indexed_OP2);
4933 
4934     Found |= Match(AArch64::FNMULDrr, 1, MCP::FNMULSUBD_OP1);
4935     break;
4936   case AArch64::FSUBv4f16:
4937     Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLSv4i16_indexed_OP2) ||
4938              Match(AArch64::FMULv4f16, 2, MCP::FMLSv4f16_OP2);
4939 
4940     Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLSv4i16_indexed_OP1) ||
4941              Match(AArch64::FMULv4f16, 1, MCP::FMLSv4f16_OP1);
4942     break;
4943   case AArch64::FSUBv8f16:
4944     Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLSv8i16_indexed_OP2) ||
4945              Match(AArch64::FMULv8f16, 2, MCP::FMLSv8f16_OP2);
4946 
4947     Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLSv8i16_indexed_OP1) ||
4948              Match(AArch64::FMULv8f16, 1, MCP::FMLSv8f16_OP1);
4949     break;
4950   case AArch64::FSUBv2f32:
4951     Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLSv2i32_indexed_OP2) ||
4952              Match(AArch64::FMULv2f32, 2, MCP::FMLSv2f32_OP2);
4953 
4954     Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLSv2i32_indexed_OP1) ||
4955              Match(AArch64::FMULv2f32, 1, MCP::FMLSv2f32_OP1);
4956     break;
4957   case AArch64::FSUBv2f64:
4958     Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLSv2i64_indexed_OP2) ||
4959              Match(AArch64::FMULv2f64, 2, MCP::FMLSv2f64_OP2);
4960 
4961     Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLSv2i64_indexed_OP1) ||
4962              Match(AArch64::FMULv2f64, 1, MCP::FMLSv2f64_OP1);
4963     break;
4964   case AArch64::FSUBv4f32:
4965     Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLSv4i32_indexed_OP2) ||
4966              Match(AArch64::FMULv4f32, 2, MCP::FMLSv4f32_OP2);
4967 
4968     Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLSv4i32_indexed_OP1) ||
4969              Match(AArch64::FMULv4f32, 1, MCP::FMLSv4f32_OP1);
4970     break;
4971   }
4972   return Found;
4973 }
4974 
4975 static bool getFMULPatterns(MachineInstr &Root,
4976                             SmallVectorImpl<MachineCombinerPattern> &Patterns) {
4977   MachineBasicBlock &MBB = *Root.getParent();
4978   bool Found = false;
4979 
4980   auto Match = [&](unsigned Opcode, int Operand,
4981                    MachineCombinerPattern Pattern) -> bool {
4982     MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4983     MachineOperand &MO = Root.getOperand(Operand);
4984     MachineInstr *MI = nullptr;
4985     if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
4986       MI = MRI.getUniqueVRegDef(MO.getReg());
4987     if (MI && MI->getOpcode() == Opcode) {
4988       Patterns.push_back(Pattern);
4989       return true;
4990     }
4991     return false;
4992   };
4993 
4994   typedef MachineCombinerPattern MCP;
4995 
4996   switch (Root.getOpcode()) {
4997   default:
4998     return false;
4999   case AArch64::FMULv2f32:
5000     Found = Match(AArch64::DUPv2i32lane, 1, MCP::FMULv2i32_indexed_OP1);
5001     Found |= Match(AArch64::DUPv2i32lane, 2, MCP::FMULv2i32_indexed_OP2);
5002     break;
5003   case AArch64::FMULv2f64:
5004     Found = Match(AArch64::DUPv2i64lane, 1, MCP::FMULv2i64_indexed_OP1);
5005     Found |= Match(AArch64::DUPv2i64lane, 2, MCP::FMULv2i64_indexed_OP2);
5006     break;
5007   case AArch64::FMULv4f16:
5008     Found = Match(AArch64::DUPv4i16lane, 1, MCP::FMULv4i16_indexed_OP1);
5009     Found |= Match(AArch64::DUPv4i16lane, 2, MCP::FMULv4i16_indexed_OP2);
5010     break;
5011   case AArch64::FMULv4f32:
5012     Found = Match(AArch64::DUPv4i32lane, 1, MCP::FMULv4i32_indexed_OP1);
5013     Found |= Match(AArch64::DUPv4i32lane, 2, MCP::FMULv4i32_indexed_OP2);
5014     break;
5015   case AArch64::FMULv8f16:
5016     Found = Match(AArch64::DUPv8i16lane, 1, MCP::FMULv8i16_indexed_OP1);
5017     Found |= Match(AArch64::DUPv8i16lane, 2, MCP::FMULv8i16_indexed_OP2);
5018     break;
5019   }
5020 
5021   return Found;
5022 }
5023 
5024 /// Return true when a code sequence can improve throughput. It
5025 /// should be called only for instructions in loops.
5026 /// \param Pattern - combiner pattern
5027 bool AArch64InstrInfo::isThroughputPattern(
5028     MachineCombinerPattern Pattern) const {
5029   switch (Pattern) {
5030   default:
5031     break;
5032   case MachineCombinerPattern::FMULADDH_OP1:
5033   case MachineCombinerPattern::FMULADDH_OP2:
5034   case MachineCombinerPattern::FMULSUBH_OP1:
5035   case MachineCombinerPattern::FMULSUBH_OP2:
5036   case MachineCombinerPattern::FMULADDS_OP1:
5037   case MachineCombinerPattern::FMULADDS_OP2:
5038   case MachineCombinerPattern::FMULSUBS_OP1:
5039   case MachineCombinerPattern::FMULSUBS_OP2:
5040   case MachineCombinerPattern::FMULADDD_OP1:
5041   case MachineCombinerPattern::FMULADDD_OP2:
5042   case MachineCombinerPattern::FMULSUBD_OP1:
5043   case MachineCombinerPattern::FMULSUBD_OP2:
5044   case MachineCombinerPattern::FNMULSUBH_OP1:
5045   case MachineCombinerPattern::FNMULSUBS_OP1:
5046   case MachineCombinerPattern::FNMULSUBD_OP1:
5047   case MachineCombinerPattern::FMLAv4i16_indexed_OP1:
5048   case MachineCombinerPattern::FMLAv4i16_indexed_OP2:
5049   case MachineCombinerPattern::FMLAv8i16_indexed_OP1:
5050   case MachineCombinerPattern::FMLAv8i16_indexed_OP2:
5051   case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
5052   case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
5053   case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
5054   case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
5055   case MachineCombinerPattern::FMLAv4f16_OP2:
5056   case MachineCombinerPattern::FMLAv4f16_OP1:
5057   case MachineCombinerPattern::FMLAv8f16_OP1:
5058   case MachineCombinerPattern::FMLAv8f16_OP2:
5059   case MachineCombinerPattern::FMLAv2f32_OP2:
5060   case MachineCombinerPattern::FMLAv2f32_OP1:
5061   case MachineCombinerPattern::FMLAv2f64_OP1:
5062   case MachineCombinerPattern::FMLAv2f64_OP2:
5063   case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
5064   case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
5065   case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
5066   case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
5067   case MachineCombinerPattern::FMLAv4f32_OP1:
5068   case MachineCombinerPattern::FMLAv4f32_OP2:
5069   case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
5070   case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
5071   case MachineCombinerPattern::FMLSv4i16_indexed_OP1:
5072   case MachineCombinerPattern::FMLSv4i16_indexed_OP2:
5073   case MachineCombinerPattern::FMLSv8i16_indexed_OP1:
5074   case MachineCombinerPattern::FMLSv8i16_indexed_OP2:
5075   case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
5076   case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
5077   case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
5078   case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
5079   case MachineCombinerPattern::FMLSv4f16_OP1:
5080   case MachineCombinerPattern::FMLSv4f16_OP2:
5081   case MachineCombinerPattern::FMLSv8f16_OP1:
5082   case MachineCombinerPattern::FMLSv8f16_OP2:
5083   case MachineCombinerPattern::FMLSv2f32_OP2:
5084   case MachineCombinerPattern::FMLSv2f64_OP2:
5085   case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
5086   case MachineCombinerPattern::FMLSv4f32_OP2:
5087   case MachineCombinerPattern::FMULv2i32_indexed_OP1:
5088   case MachineCombinerPattern::FMULv2i32_indexed_OP2:
5089   case MachineCombinerPattern::FMULv2i64_indexed_OP1:
5090   case MachineCombinerPattern::FMULv2i64_indexed_OP2:
5091   case MachineCombinerPattern::FMULv4i16_indexed_OP1:
5092   case MachineCombinerPattern::FMULv4i16_indexed_OP2:
5093   case MachineCombinerPattern::FMULv4i32_indexed_OP1:
5094   case MachineCombinerPattern::FMULv4i32_indexed_OP2:
5095   case MachineCombinerPattern::FMULv8i16_indexed_OP1:
5096   case MachineCombinerPattern::FMULv8i16_indexed_OP2:
5097   case MachineCombinerPattern::MULADDv8i8_OP1:
5098   case MachineCombinerPattern::MULADDv8i8_OP2:
5099   case MachineCombinerPattern::MULADDv16i8_OP1:
5100   case MachineCombinerPattern::MULADDv16i8_OP2:
5101   case MachineCombinerPattern::MULADDv4i16_OP1:
5102   case MachineCombinerPattern::MULADDv4i16_OP2:
5103   case MachineCombinerPattern::MULADDv8i16_OP1:
5104   case MachineCombinerPattern::MULADDv8i16_OP2:
5105   case MachineCombinerPattern::MULADDv2i32_OP1:
5106   case MachineCombinerPattern::MULADDv2i32_OP2:
5107   case MachineCombinerPattern::MULADDv4i32_OP1:
5108   case MachineCombinerPattern::MULADDv4i32_OP2:
5109   case MachineCombinerPattern::MULSUBv8i8_OP1:
5110   case MachineCombinerPattern::MULSUBv8i8_OP2:
5111   case MachineCombinerPattern::MULSUBv16i8_OP1:
5112   case MachineCombinerPattern::MULSUBv16i8_OP2:
5113   case MachineCombinerPattern::MULSUBv4i16_OP1:
5114   case MachineCombinerPattern::MULSUBv4i16_OP2:
5115   case MachineCombinerPattern::MULSUBv8i16_OP1:
5116   case MachineCombinerPattern::MULSUBv8i16_OP2:
5117   case MachineCombinerPattern::MULSUBv2i32_OP1:
5118   case MachineCombinerPattern::MULSUBv2i32_OP2:
5119   case MachineCombinerPattern::MULSUBv4i32_OP1:
5120   case MachineCombinerPattern::MULSUBv4i32_OP2:
5121   case MachineCombinerPattern::MULADDv4i16_indexed_OP1:
5122   case MachineCombinerPattern::MULADDv4i16_indexed_OP2:
5123   case MachineCombinerPattern::MULADDv8i16_indexed_OP1:
5124   case MachineCombinerPattern::MULADDv8i16_indexed_OP2:
5125   case MachineCombinerPattern::MULADDv2i32_indexed_OP1:
5126   case MachineCombinerPattern::MULADDv2i32_indexed_OP2:
5127   case MachineCombinerPattern::MULADDv4i32_indexed_OP1:
5128   case MachineCombinerPattern::MULADDv4i32_indexed_OP2:
5129   case MachineCombinerPattern::MULSUBv4i16_indexed_OP1:
5130   case MachineCombinerPattern::MULSUBv4i16_indexed_OP2:
5131   case MachineCombinerPattern::MULSUBv8i16_indexed_OP1:
5132   case MachineCombinerPattern::MULSUBv8i16_indexed_OP2:
5133   case MachineCombinerPattern::MULSUBv2i32_indexed_OP1:
5134   case MachineCombinerPattern::MULSUBv2i32_indexed_OP2:
5135   case MachineCombinerPattern::MULSUBv4i32_indexed_OP1:
5136   case MachineCombinerPattern::MULSUBv4i32_indexed_OP2:
5137     return true;
5138   } // end switch (Pattern)
5139   return false;
5140 }
5141 /// Return true when there is potentially a faster code sequence for an
5142 /// instruction chain ending in \p Root. All potential patterns are listed in
5143 /// the \p Pattern vector. Pattern should be sorted in priority order since the
5144 /// pattern evaluator stops checking as soon as it finds a faster sequence.
5145 
5146 bool AArch64InstrInfo::getMachineCombinerPatterns(
5147     MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
5148     bool DoRegPressureReduce) const {
5149   // Integer patterns
5150   if (getMaddPatterns(Root, Patterns))
5151     return true;
5152   // Floating point patterns
5153   if (getFMULPatterns(Root, Patterns))
5154     return true;
5155   if (getFMAPatterns(Root, Patterns))
5156     return true;
5157 
5158   return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
5159                                                      DoRegPressureReduce);
5160 }
5161 
5162 enum class FMAInstKind { Default, Indexed, Accumulator };
5163 /// genFusedMultiply - Generate fused multiply instructions.
5164 /// This function supports both integer and floating point instructions.
5165 /// A typical example:
5166 ///  F|MUL I=A,B,0
5167 ///  F|ADD R,I,C
5168 ///  ==> F|MADD R,A,B,C
5169 /// \param MF Containing MachineFunction
5170 /// \param MRI Register information
5171 /// \param TII Target information
5172 /// \param Root is the F|ADD instruction
5173 /// \param [out] InsInstrs is a vector of machine instructions and will
5174 /// contain the generated madd instruction
5175 /// \param IdxMulOpd is index of operand in Root that is the result of
5176 /// the F|MUL. In the example above IdxMulOpd is 1.
5177 /// \param MaddOpc the opcode fo the f|madd instruction
5178 /// \param RC Register class of operands
5179 /// \param kind of fma instruction (addressing mode) to be generated
5180 /// \param ReplacedAddend is the result register from the instruction
5181 /// replacing the non-combined operand, if any.
5182 static MachineInstr *
5183 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
5184                  const TargetInstrInfo *TII, MachineInstr &Root,
5185                  SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
5186                  unsigned MaddOpc, const TargetRegisterClass *RC,
5187                  FMAInstKind kind = FMAInstKind::Default,
5188                  const Register *ReplacedAddend = nullptr) {
5189   assert(IdxMulOpd == 1 || IdxMulOpd == 2);
5190 
5191   unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
5192   MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
5193   Register ResultReg = Root.getOperand(0).getReg();
5194   Register SrcReg0 = MUL->getOperand(1).getReg();
5195   bool Src0IsKill = MUL->getOperand(1).isKill();
5196   Register SrcReg1 = MUL->getOperand(2).getReg();
5197   bool Src1IsKill = MUL->getOperand(2).isKill();
5198 
5199   unsigned SrcReg2;
5200   bool Src2IsKill;
5201   if (ReplacedAddend) {
5202     // If we just generated a new addend, we must be it's only use.
5203     SrcReg2 = *ReplacedAddend;
5204     Src2IsKill = true;
5205   } else {
5206     SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
5207     Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
5208   }
5209 
5210   if (Register::isVirtualRegister(ResultReg))
5211     MRI.constrainRegClass(ResultReg, RC);
5212   if (Register::isVirtualRegister(SrcReg0))
5213     MRI.constrainRegClass(SrcReg0, RC);
5214   if (Register::isVirtualRegister(SrcReg1))
5215     MRI.constrainRegClass(SrcReg1, RC);
5216   if (Register::isVirtualRegister(SrcReg2))
5217     MRI.constrainRegClass(SrcReg2, RC);
5218 
5219   MachineInstrBuilder MIB;
5220   if (kind == FMAInstKind::Default)
5221     MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
5222               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5223               .addReg(SrcReg1, getKillRegState(Src1IsKill))
5224               .addReg(SrcReg2, getKillRegState(Src2IsKill));
5225   else if (kind == FMAInstKind::Indexed)
5226     MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
5227               .addReg(SrcReg2, getKillRegState(Src2IsKill))
5228               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5229               .addReg(SrcReg1, getKillRegState(Src1IsKill))
5230               .addImm(MUL->getOperand(3).getImm());
5231   else if (kind == FMAInstKind::Accumulator)
5232     MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
5233               .addReg(SrcReg2, getKillRegState(Src2IsKill))
5234               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5235               .addReg(SrcReg1, getKillRegState(Src1IsKill));
5236   else
5237     assert(false && "Invalid FMA instruction kind \n");
5238   // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
5239   InsInstrs.push_back(MIB);
5240   return MUL;
5241 }
5242 
5243 /// Fold (FMUL x (DUP y lane)) into (FMUL_indexed x y lane)
5244 static MachineInstr *
5245 genIndexedMultiply(MachineInstr &Root,
5246                    SmallVectorImpl<MachineInstr *> &InsInstrs,
5247                    unsigned IdxDupOp, unsigned MulOpc,
5248                    const TargetRegisterClass *RC, MachineRegisterInfo &MRI) {
5249   assert(((IdxDupOp == 1) || (IdxDupOp == 2)) &&
5250          "Invalid index of FMUL operand");
5251 
5252   MachineFunction &MF = *Root.getMF();
5253   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5254 
5255   MachineInstr *Dup =
5256       MF.getRegInfo().getUniqueVRegDef(Root.getOperand(IdxDupOp).getReg());
5257 
5258   Register DupSrcReg = Dup->getOperand(1).getReg();
5259   MRI.clearKillFlags(DupSrcReg);
5260   MRI.constrainRegClass(DupSrcReg, RC);
5261 
5262   unsigned DupSrcLane = Dup->getOperand(2).getImm();
5263 
5264   unsigned IdxMulOp = IdxDupOp == 1 ? 2 : 1;
5265   MachineOperand &MulOp = Root.getOperand(IdxMulOp);
5266 
5267   Register ResultReg = Root.getOperand(0).getReg();
5268 
5269   MachineInstrBuilder MIB;
5270   MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MulOpc), ResultReg)
5271             .add(MulOp)
5272             .addReg(DupSrcReg)
5273             .addImm(DupSrcLane);
5274 
5275   InsInstrs.push_back(MIB);
5276   return &Root;
5277 }
5278 
5279 /// genFusedMultiplyAcc - Helper to generate fused multiply accumulate
5280 /// instructions.
5281 ///
5282 /// \see genFusedMultiply
5283 static MachineInstr *genFusedMultiplyAcc(
5284     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5285     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5286     unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {
5287   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5288                           FMAInstKind::Accumulator);
5289 }
5290 
5291 /// genNeg - Helper to generate an intermediate negation of the second operand
5292 /// of Root
5293 static Register genNeg(MachineFunction &MF, MachineRegisterInfo &MRI,
5294                        const TargetInstrInfo *TII, MachineInstr &Root,
5295                        SmallVectorImpl<MachineInstr *> &InsInstrs,
5296                        DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
5297                        unsigned MnegOpc, const TargetRegisterClass *RC) {
5298   Register NewVR = MRI.createVirtualRegister(RC);
5299   MachineInstrBuilder MIB =
5300       BuildMI(MF, Root.getDebugLoc(), TII->get(MnegOpc), NewVR)
5301           .add(Root.getOperand(2));
5302   InsInstrs.push_back(MIB);
5303 
5304   assert(InstrIdxForVirtReg.empty());
5305   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5306 
5307   return NewVR;
5308 }
5309 
5310 /// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
5311 /// instructions with an additional negation of the accumulator
5312 static MachineInstr *genFusedMultiplyAccNeg(
5313     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5314     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5315     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
5316     unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {
5317   assert(IdxMulOpd == 1);
5318 
5319   Register NewVR =
5320       genNeg(MF, MRI, TII, Root, InsInstrs, InstrIdxForVirtReg, MnegOpc, RC);
5321   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5322                           FMAInstKind::Accumulator, &NewVR);
5323 }
5324 
5325 /// genFusedMultiplyIdx - Helper to generate fused multiply accumulate
5326 /// instructions.
5327 ///
5328 /// \see genFusedMultiply
5329 static MachineInstr *genFusedMultiplyIdx(
5330     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5331     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5332     unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {
5333   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5334                           FMAInstKind::Indexed);
5335 }
5336 
5337 /// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
5338 /// instructions with an additional negation of the accumulator
5339 static MachineInstr *genFusedMultiplyIdxNeg(
5340     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5341     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5342     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
5343     unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {
5344   assert(IdxMulOpd == 1);
5345 
5346   Register NewVR =
5347       genNeg(MF, MRI, TII, Root, InsInstrs, InstrIdxForVirtReg, MnegOpc, RC);
5348 
5349   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5350                           FMAInstKind::Indexed, &NewVR);
5351 }
5352 
5353 /// genMaddR - Generate madd instruction and combine mul and add using
5354 /// an extra virtual register
5355 /// Example - an ADD intermediate needs to be stored in a register:
5356 ///   MUL I=A,B,0
5357 ///   ADD R,I,Imm
5358 ///   ==> ORR  V, ZR, Imm
5359 ///   ==> MADD R,A,B,V
5360 /// \param MF Containing MachineFunction
5361 /// \param MRI Register information
5362 /// \param TII Target information
5363 /// \param Root is the ADD instruction
5364 /// \param [out] InsInstrs is a vector of machine instructions and will
5365 /// contain the generated madd instruction
5366 /// \param IdxMulOpd is index of operand in Root that is the result of
5367 /// the MUL. In the example above IdxMulOpd is 1.
5368 /// \param MaddOpc the opcode fo the madd instruction
5369 /// \param VR is a virtual register that holds the value of an ADD operand
5370 /// (V in the example above).
5371 /// \param RC Register class of operands
5372 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
5373                               const TargetInstrInfo *TII, MachineInstr &Root,
5374                               SmallVectorImpl<MachineInstr *> &InsInstrs,
5375                               unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
5376                               const TargetRegisterClass *RC) {
5377   assert(IdxMulOpd == 1 || IdxMulOpd == 2);
5378 
5379   MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
5380   Register ResultReg = Root.getOperand(0).getReg();
5381   Register SrcReg0 = MUL->getOperand(1).getReg();
5382   bool Src0IsKill = MUL->getOperand(1).isKill();
5383   Register SrcReg1 = MUL->getOperand(2).getReg();
5384   bool Src1IsKill = MUL->getOperand(2).isKill();
5385 
5386   if (Register::isVirtualRegister(ResultReg))
5387     MRI.constrainRegClass(ResultReg, RC);
5388   if (Register::isVirtualRegister(SrcReg0))
5389     MRI.constrainRegClass(SrcReg0, RC);
5390   if (Register::isVirtualRegister(SrcReg1))
5391     MRI.constrainRegClass(SrcReg1, RC);
5392   if (Register::isVirtualRegister(VR))
5393     MRI.constrainRegClass(VR, RC);
5394 
5395   MachineInstrBuilder MIB =
5396       BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
5397           .addReg(SrcReg0, getKillRegState(Src0IsKill))
5398           .addReg(SrcReg1, getKillRegState(Src1IsKill))
5399           .addReg(VR);
5400   // Insert the MADD
5401   InsInstrs.push_back(MIB);
5402   return MUL;
5403 }
5404 
5405 /// When getMachineCombinerPatterns() finds potential patterns,
5406 /// this function generates the instructions that could replace the
5407 /// original code sequence
5408 void AArch64InstrInfo::genAlternativeCodeSequence(
5409     MachineInstr &Root, MachineCombinerPattern Pattern,
5410     SmallVectorImpl<MachineInstr *> &InsInstrs,
5411     SmallVectorImpl<MachineInstr *> &DelInstrs,
5412     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
5413   MachineBasicBlock &MBB = *Root.getParent();
5414   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5415   MachineFunction &MF = *MBB.getParent();
5416   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5417 
5418   MachineInstr *MUL = nullptr;
5419   const TargetRegisterClass *RC;
5420   unsigned Opc;
5421   switch (Pattern) {
5422   default:
5423     // Reassociate instructions.
5424     TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
5425                                                 DelInstrs, InstrIdxForVirtReg);
5426     return;
5427   case MachineCombinerPattern::MULADDW_OP1:
5428   case MachineCombinerPattern::MULADDX_OP1:
5429     // MUL I=A,B,0
5430     // ADD R,I,C
5431     // ==> MADD R,A,B,C
5432     // --- Create(MADD);
5433     if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
5434       Opc = AArch64::MADDWrrr;
5435       RC = &AArch64::GPR32RegClass;
5436     } else {
5437       Opc = AArch64::MADDXrrr;
5438       RC = &AArch64::GPR64RegClass;
5439     }
5440     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5441     break;
5442   case MachineCombinerPattern::MULADDW_OP2:
5443   case MachineCombinerPattern::MULADDX_OP2:
5444     // MUL I=A,B,0
5445     // ADD R,C,I
5446     // ==> MADD R,A,B,C
5447     // --- Create(MADD);
5448     if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
5449       Opc = AArch64::MADDWrrr;
5450       RC = &AArch64::GPR32RegClass;
5451     } else {
5452       Opc = AArch64::MADDXrrr;
5453       RC = &AArch64::GPR64RegClass;
5454     }
5455     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5456     break;
5457   case MachineCombinerPattern::MULADDWI_OP1:
5458   case MachineCombinerPattern::MULADDXI_OP1: {
5459     // MUL I=A,B,0
5460     // ADD R,I,Imm
5461     // ==> ORR  V, ZR, Imm
5462     // ==> MADD R,A,B,V
5463     // --- Create(MADD);
5464     const TargetRegisterClass *OrrRC;
5465     unsigned BitSize, OrrOpc, ZeroReg;
5466     if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
5467       OrrOpc = AArch64::ORRWri;
5468       OrrRC = &AArch64::GPR32spRegClass;
5469       BitSize = 32;
5470       ZeroReg = AArch64::WZR;
5471       Opc = AArch64::MADDWrrr;
5472       RC = &AArch64::GPR32RegClass;
5473     } else {
5474       OrrOpc = AArch64::ORRXri;
5475       OrrRC = &AArch64::GPR64spRegClass;
5476       BitSize = 64;
5477       ZeroReg = AArch64::XZR;
5478       Opc = AArch64::MADDXrrr;
5479       RC = &AArch64::GPR64RegClass;
5480     }
5481     Register NewVR = MRI.createVirtualRegister(OrrRC);
5482     uint64_t Imm = Root.getOperand(2).getImm();
5483 
5484     if (Root.getOperand(3).isImm()) {
5485       unsigned Val = Root.getOperand(3).getImm();
5486       Imm = Imm << Val;
5487     }
5488     uint64_t UImm = SignExtend64(Imm, BitSize);
5489     uint64_t Encoding;
5490     if (!AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding))
5491       return;
5492     MachineInstrBuilder MIB1 =
5493         BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
5494             .addReg(ZeroReg)
5495             .addImm(Encoding);
5496     InsInstrs.push_back(MIB1);
5497     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5498     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
5499     break;
5500   }
5501   case MachineCombinerPattern::MULSUBW_OP1:
5502   case MachineCombinerPattern::MULSUBX_OP1: {
5503     // MUL I=A,B,0
5504     // SUB R,I, C
5505     // ==> SUB  V, 0, C
5506     // ==> MADD R,A,B,V // = -C + A*B
5507     // --- Create(MADD);
5508     const TargetRegisterClass *SubRC;
5509     unsigned SubOpc, ZeroReg;
5510     if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
5511       SubOpc = AArch64::SUBWrr;
5512       SubRC = &AArch64::GPR32spRegClass;
5513       ZeroReg = AArch64::WZR;
5514       Opc = AArch64::MADDWrrr;
5515       RC = &AArch64::GPR32RegClass;
5516     } else {
5517       SubOpc = AArch64::SUBXrr;
5518       SubRC = &AArch64::GPR64spRegClass;
5519       ZeroReg = AArch64::XZR;
5520       Opc = AArch64::MADDXrrr;
5521       RC = &AArch64::GPR64RegClass;
5522     }
5523     Register NewVR = MRI.createVirtualRegister(SubRC);
5524     // SUB NewVR, 0, C
5525     MachineInstrBuilder MIB1 =
5526         BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
5527             .addReg(ZeroReg)
5528             .add(Root.getOperand(2));
5529     InsInstrs.push_back(MIB1);
5530     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5531     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
5532     break;
5533   }
5534   case MachineCombinerPattern::MULSUBW_OP2:
5535   case MachineCombinerPattern::MULSUBX_OP2:
5536     // MUL I=A,B,0
5537     // SUB R,C,I
5538     // ==> MSUB R,A,B,C (computes C - A*B)
5539     // --- Create(MSUB);
5540     if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
5541       Opc = AArch64::MSUBWrrr;
5542       RC = &AArch64::GPR32RegClass;
5543     } else {
5544       Opc = AArch64::MSUBXrrr;
5545       RC = &AArch64::GPR64RegClass;
5546     }
5547     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5548     break;
5549   case MachineCombinerPattern::MULSUBWI_OP1:
5550   case MachineCombinerPattern::MULSUBXI_OP1: {
5551     // MUL I=A,B,0
5552     // SUB R,I, Imm
5553     // ==> ORR  V, ZR, -Imm
5554     // ==> MADD R,A,B,V // = -Imm + A*B
5555     // --- Create(MADD);
5556     const TargetRegisterClass *OrrRC;
5557     unsigned BitSize, OrrOpc, ZeroReg;
5558     if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
5559       OrrOpc = AArch64::ORRWri;
5560       OrrRC = &AArch64::GPR32spRegClass;
5561       BitSize = 32;
5562       ZeroReg = AArch64::WZR;
5563       Opc = AArch64::MADDWrrr;
5564       RC = &AArch64::GPR32RegClass;
5565     } else {
5566       OrrOpc = AArch64::ORRXri;
5567       OrrRC = &AArch64::GPR64spRegClass;
5568       BitSize = 64;
5569       ZeroReg = AArch64::XZR;
5570       Opc = AArch64::MADDXrrr;
5571       RC = &AArch64::GPR64RegClass;
5572     }
5573     Register NewVR = MRI.createVirtualRegister(OrrRC);
5574     uint64_t Imm = Root.getOperand(2).getImm();
5575     if (Root.getOperand(3).isImm()) {
5576       unsigned Val = Root.getOperand(3).getImm();
5577       Imm = Imm << Val;
5578     }
5579     uint64_t UImm = SignExtend64(-Imm, BitSize);
5580     uint64_t Encoding;
5581     if (!AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding))
5582       return;
5583     MachineInstrBuilder MIB1 =
5584         BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
5585             .addReg(ZeroReg)
5586             .addImm(Encoding);
5587     InsInstrs.push_back(MIB1);
5588     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5589     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
5590     break;
5591   }
5592 
5593   case MachineCombinerPattern::MULADDv8i8_OP1:
5594     Opc = AArch64::MLAv8i8;
5595     RC = &AArch64::FPR64RegClass;
5596     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5597     break;
5598   case MachineCombinerPattern::MULADDv8i8_OP2:
5599     Opc = AArch64::MLAv8i8;
5600     RC = &AArch64::FPR64RegClass;
5601     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5602     break;
5603   case MachineCombinerPattern::MULADDv16i8_OP1:
5604     Opc = AArch64::MLAv16i8;
5605     RC = &AArch64::FPR128RegClass;
5606     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5607     break;
5608   case MachineCombinerPattern::MULADDv16i8_OP2:
5609     Opc = AArch64::MLAv16i8;
5610     RC = &AArch64::FPR128RegClass;
5611     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5612     break;
5613   case MachineCombinerPattern::MULADDv4i16_OP1:
5614     Opc = AArch64::MLAv4i16;
5615     RC = &AArch64::FPR64RegClass;
5616     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5617     break;
5618   case MachineCombinerPattern::MULADDv4i16_OP2:
5619     Opc = AArch64::MLAv4i16;
5620     RC = &AArch64::FPR64RegClass;
5621     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5622     break;
5623   case MachineCombinerPattern::MULADDv8i16_OP1:
5624     Opc = AArch64::MLAv8i16;
5625     RC = &AArch64::FPR128RegClass;
5626     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5627     break;
5628   case MachineCombinerPattern::MULADDv8i16_OP2:
5629     Opc = AArch64::MLAv8i16;
5630     RC = &AArch64::FPR128RegClass;
5631     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5632     break;
5633   case MachineCombinerPattern::MULADDv2i32_OP1:
5634     Opc = AArch64::MLAv2i32;
5635     RC = &AArch64::FPR64RegClass;
5636     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5637     break;
5638   case MachineCombinerPattern::MULADDv2i32_OP2:
5639     Opc = AArch64::MLAv2i32;
5640     RC = &AArch64::FPR64RegClass;
5641     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5642     break;
5643   case MachineCombinerPattern::MULADDv4i32_OP1:
5644     Opc = AArch64::MLAv4i32;
5645     RC = &AArch64::FPR128RegClass;
5646     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5647     break;
5648   case MachineCombinerPattern::MULADDv4i32_OP2:
5649     Opc = AArch64::MLAv4i32;
5650     RC = &AArch64::FPR128RegClass;
5651     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5652     break;
5653 
5654   case MachineCombinerPattern::MULSUBv8i8_OP1:
5655     Opc = AArch64::MLAv8i8;
5656     RC = &AArch64::FPR64RegClass;
5657     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5658                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i8,
5659                                  RC);
5660     break;
5661   case MachineCombinerPattern::MULSUBv8i8_OP2:
5662     Opc = AArch64::MLSv8i8;
5663     RC = &AArch64::FPR64RegClass;
5664     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5665     break;
5666   case MachineCombinerPattern::MULSUBv16i8_OP1:
5667     Opc = AArch64::MLAv16i8;
5668     RC = &AArch64::FPR128RegClass;
5669     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5670                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv16i8,
5671                                  RC);
5672     break;
5673   case MachineCombinerPattern::MULSUBv16i8_OP2:
5674     Opc = AArch64::MLSv16i8;
5675     RC = &AArch64::FPR128RegClass;
5676     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5677     break;
5678   case MachineCombinerPattern::MULSUBv4i16_OP1:
5679     Opc = AArch64::MLAv4i16;
5680     RC = &AArch64::FPR64RegClass;
5681     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5682                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i16,
5683                                  RC);
5684     break;
5685   case MachineCombinerPattern::MULSUBv4i16_OP2:
5686     Opc = AArch64::MLSv4i16;
5687     RC = &AArch64::FPR64RegClass;
5688     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5689     break;
5690   case MachineCombinerPattern::MULSUBv8i16_OP1:
5691     Opc = AArch64::MLAv8i16;
5692     RC = &AArch64::FPR128RegClass;
5693     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5694                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i16,
5695                                  RC);
5696     break;
5697   case MachineCombinerPattern::MULSUBv8i16_OP2:
5698     Opc = AArch64::MLSv8i16;
5699     RC = &AArch64::FPR128RegClass;
5700     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5701     break;
5702   case MachineCombinerPattern::MULSUBv2i32_OP1:
5703     Opc = AArch64::MLAv2i32;
5704     RC = &AArch64::FPR64RegClass;
5705     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5706                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv2i32,
5707                                  RC);
5708     break;
5709   case MachineCombinerPattern::MULSUBv2i32_OP2:
5710     Opc = AArch64::MLSv2i32;
5711     RC = &AArch64::FPR64RegClass;
5712     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5713     break;
5714   case MachineCombinerPattern::MULSUBv4i32_OP1:
5715     Opc = AArch64::MLAv4i32;
5716     RC = &AArch64::FPR128RegClass;
5717     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
5718                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i32,
5719                                  RC);
5720     break;
5721   case MachineCombinerPattern::MULSUBv4i32_OP2:
5722     Opc = AArch64::MLSv4i32;
5723     RC = &AArch64::FPR128RegClass;
5724     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5725     break;
5726 
5727   case MachineCombinerPattern::MULADDv4i16_indexed_OP1:
5728     Opc = AArch64::MLAv4i16_indexed;
5729     RC = &AArch64::FPR64RegClass;
5730     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5731     break;
5732   case MachineCombinerPattern::MULADDv4i16_indexed_OP2:
5733     Opc = AArch64::MLAv4i16_indexed;
5734     RC = &AArch64::FPR64RegClass;
5735     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5736     break;
5737   case MachineCombinerPattern::MULADDv8i16_indexed_OP1:
5738     Opc = AArch64::MLAv8i16_indexed;
5739     RC = &AArch64::FPR128RegClass;
5740     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5741     break;
5742   case MachineCombinerPattern::MULADDv8i16_indexed_OP2:
5743     Opc = AArch64::MLAv8i16_indexed;
5744     RC = &AArch64::FPR128RegClass;
5745     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5746     break;
5747   case MachineCombinerPattern::MULADDv2i32_indexed_OP1:
5748     Opc = AArch64::MLAv2i32_indexed;
5749     RC = &AArch64::FPR64RegClass;
5750     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5751     break;
5752   case MachineCombinerPattern::MULADDv2i32_indexed_OP2:
5753     Opc = AArch64::MLAv2i32_indexed;
5754     RC = &AArch64::FPR64RegClass;
5755     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5756     break;
5757   case MachineCombinerPattern::MULADDv4i32_indexed_OP1:
5758     Opc = AArch64::MLAv4i32_indexed;
5759     RC = &AArch64::FPR128RegClass;
5760     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5761     break;
5762   case MachineCombinerPattern::MULADDv4i32_indexed_OP2:
5763     Opc = AArch64::MLAv4i32_indexed;
5764     RC = &AArch64::FPR128RegClass;
5765     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5766     break;
5767 
5768   case MachineCombinerPattern::MULSUBv4i16_indexed_OP1:
5769     Opc = AArch64::MLAv4i16_indexed;
5770     RC = &AArch64::FPR64RegClass;
5771     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
5772                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i16,
5773                                  RC);
5774     break;
5775   case MachineCombinerPattern::MULSUBv4i16_indexed_OP2:
5776     Opc = AArch64::MLSv4i16_indexed;
5777     RC = &AArch64::FPR64RegClass;
5778     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5779     break;
5780   case MachineCombinerPattern::MULSUBv8i16_indexed_OP1:
5781     Opc = AArch64::MLAv8i16_indexed;
5782     RC = &AArch64::FPR128RegClass;
5783     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
5784                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i16,
5785                                  RC);
5786     break;
5787   case MachineCombinerPattern::MULSUBv8i16_indexed_OP2:
5788     Opc = AArch64::MLSv8i16_indexed;
5789     RC = &AArch64::FPR128RegClass;
5790     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5791     break;
5792   case MachineCombinerPattern::MULSUBv2i32_indexed_OP1:
5793     Opc = AArch64::MLAv2i32_indexed;
5794     RC = &AArch64::FPR64RegClass;
5795     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
5796                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv2i32,
5797                                  RC);
5798     break;
5799   case MachineCombinerPattern::MULSUBv2i32_indexed_OP2:
5800     Opc = AArch64::MLSv2i32_indexed;
5801     RC = &AArch64::FPR64RegClass;
5802     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5803     break;
5804   case MachineCombinerPattern::MULSUBv4i32_indexed_OP1:
5805     Opc = AArch64::MLAv4i32_indexed;
5806     RC = &AArch64::FPR128RegClass;
5807     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
5808                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i32,
5809                                  RC);
5810     break;
5811   case MachineCombinerPattern::MULSUBv4i32_indexed_OP2:
5812     Opc = AArch64::MLSv4i32_indexed;
5813     RC = &AArch64::FPR128RegClass;
5814     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5815     break;
5816 
5817   // Floating Point Support
5818   case MachineCombinerPattern::FMULADDH_OP1:
5819     Opc = AArch64::FMADDHrrr;
5820     RC = &AArch64::FPR16RegClass;
5821     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5822     break;
5823   case MachineCombinerPattern::FMULADDS_OP1:
5824     Opc = AArch64::FMADDSrrr;
5825     RC = &AArch64::FPR32RegClass;
5826     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5827     break;
5828   case MachineCombinerPattern::FMULADDD_OP1:
5829     Opc = AArch64::FMADDDrrr;
5830     RC = &AArch64::FPR64RegClass;
5831     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5832     break;
5833 
5834   case MachineCombinerPattern::FMULADDH_OP2:
5835     Opc = AArch64::FMADDHrrr;
5836     RC = &AArch64::FPR16RegClass;
5837     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5838     break;
5839   case MachineCombinerPattern::FMULADDS_OP2:
5840     Opc = AArch64::FMADDSrrr;
5841     RC = &AArch64::FPR32RegClass;
5842     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5843     break;
5844   case MachineCombinerPattern::FMULADDD_OP2:
5845     Opc = AArch64::FMADDDrrr;
5846     RC = &AArch64::FPR64RegClass;
5847     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5848     break;
5849 
5850   case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
5851     Opc = AArch64::FMLAv1i32_indexed;
5852     RC = &AArch64::FPR32RegClass;
5853     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5854                            FMAInstKind::Indexed);
5855     break;
5856   case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
5857     Opc = AArch64::FMLAv1i32_indexed;
5858     RC = &AArch64::FPR32RegClass;
5859     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5860                            FMAInstKind::Indexed);
5861     break;
5862 
5863   case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
5864     Opc = AArch64::FMLAv1i64_indexed;
5865     RC = &AArch64::FPR64RegClass;
5866     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5867                            FMAInstKind::Indexed);
5868     break;
5869   case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
5870     Opc = AArch64::FMLAv1i64_indexed;
5871     RC = &AArch64::FPR64RegClass;
5872     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5873                            FMAInstKind::Indexed);
5874     break;
5875 
5876   case MachineCombinerPattern::FMLAv4i16_indexed_OP1:
5877     RC = &AArch64::FPR64RegClass;
5878     Opc = AArch64::FMLAv4i16_indexed;
5879     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5880                            FMAInstKind::Indexed);
5881     break;
5882   case MachineCombinerPattern::FMLAv4f16_OP1:
5883     RC = &AArch64::FPR64RegClass;
5884     Opc = AArch64::FMLAv4f16;
5885     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5886                            FMAInstKind::Accumulator);
5887     break;
5888   case MachineCombinerPattern::FMLAv4i16_indexed_OP2:
5889     RC = &AArch64::FPR64RegClass;
5890     Opc = AArch64::FMLAv4i16_indexed;
5891     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5892                            FMAInstKind::Indexed);
5893     break;
5894   case MachineCombinerPattern::FMLAv4f16_OP2:
5895     RC = &AArch64::FPR64RegClass;
5896     Opc = AArch64::FMLAv4f16;
5897     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5898                            FMAInstKind::Accumulator);
5899     break;
5900 
5901   case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
5902   case MachineCombinerPattern::FMLAv2f32_OP1:
5903     RC = &AArch64::FPR64RegClass;
5904     if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
5905       Opc = AArch64::FMLAv2i32_indexed;
5906       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5907                              FMAInstKind::Indexed);
5908     } else {
5909       Opc = AArch64::FMLAv2f32;
5910       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5911                              FMAInstKind::Accumulator);
5912     }
5913     break;
5914   case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
5915   case MachineCombinerPattern::FMLAv2f32_OP2:
5916     RC = &AArch64::FPR64RegClass;
5917     if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
5918       Opc = AArch64::FMLAv2i32_indexed;
5919       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5920                              FMAInstKind::Indexed);
5921     } else {
5922       Opc = AArch64::FMLAv2f32;
5923       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5924                              FMAInstKind::Accumulator);
5925     }
5926     break;
5927 
5928   case MachineCombinerPattern::FMLAv8i16_indexed_OP1:
5929     RC = &AArch64::FPR128RegClass;
5930     Opc = AArch64::FMLAv8i16_indexed;
5931     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5932                            FMAInstKind::Indexed);
5933     break;
5934   case MachineCombinerPattern::FMLAv8f16_OP1:
5935     RC = &AArch64::FPR128RegClass;
5936     Opc = AArch64::FMLAv8f16;
5937     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5938                            FMAInstKind::Accumulator);
5939     break;
5940   case MachineCombinerPattern::FMLAv8i16_indexed_OP2:
5941     RC = &AArch64::FPR128RegClass;
5942     Opc = AArch64::FMLAv8i16_indexed;
5943     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5944                            FMAInstKind::Indexed);
5945     break;
5946   case MachineCombinerPattern::FMLAv8f16_OP2:
5947     RC = &AArch64::FPR128RegClass;
5948     Opc = AArch64::FMLAv8f16;
5949     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5950                            FMAInstKind::Accumulator);
5951     break;
5952 
5953   case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
5954   case MachineCombinerPattern::FMLAv2f64_OP1:
5955     RC = &AArch64::FPR128RegClass;
5956     if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
5957       Opc = AArch64::FMLAv2i64_indexed;
5958       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5959                              FMAInstKind::Indexed);
5960     } else {
5961       Opc = AArch64::FMLAv2f64;
5962       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5963                              FMAInstKind::Accumulator);
5964     }
5965     break;
5966   case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
5967   case MachineCombinerPattern::FMLAv2f64_OP2:
5968     RC = &AArch64::FPR128RegClass;
5969     if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
5970       Opc = AArch64::FMLAv2i64_indexed;
5971       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5972                              FMAInstKind::Indexed);
5973     } else {
5974       Opc = AArch64::FMLAv2f64;
5975       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
5976                              FMAInstKind::Accumulator);
5977     }
5978     break;
5979 
5980   case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
5981   case MachineCombinerPattern::FMLAv4f32_OP1:
5982     RC = &AArch64::FPR128RegClass;
5983     if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
5984       Opc = AArch64::FMLAv4i32_indexed;
5985       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5986                              FMAInstKind::Indexed);
5987     } else {
5988       Opc = AArch64::FMLAv4f32;
5989       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
5990                              FMAInstKind::Accumulator);
5991     }
5992     break;
5993 
5994   case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
5995   case MachineCombinerPattern::FMLAv4f32_OP2:
5996     RC = &AArch64::FPR128RegClass;
5997     if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
5998       Opc = AArch64::FMLAv4i32_indexed;
5999       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6000                              FMAInstKind::Indexed);
6001     } else {
6002       Opc = AArch64::FMLAv4f32;
6003       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6004                              FMAInstKind::Accumulator);
6005     }
6006     break;
6007 
6008   case MachineCombinerPattern::FMULSUBH_OP1:
6009     Opc = AArch64::FNMSUBHrrr;
6010     RC = &AArch64::FPR16RegClass;
6011     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6012     break;
6013   case MachineCombinerPattern::FMULSUBS_OP1:
6014     Opc = AArch64::FNMSUBSrrr;
6015     RC = &AArch64::FPR32RegClass;
6016     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6017     break;
6018   case MachineCombinerPattern::FMULSUBD_OP1:
6019     Opc = AArch64::FNMSUBDrrr;
6020     RC = &AArch64::FPR64RegClass;
6021     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6022     break;
6023 
6024   case MachineCombinerPattern::FNMULSUBH_OP1:
6025     Opc = AArch64::FNMADDHrrr;
6026     RC = &AArch64::FPR16RegClass;
6027     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6028     break;
6029   case MachineCombinerPattern::FNMULSUBS_OP1:
6030     Opc = AArch64::FNMADDSrrr;
6031     RC = &AArch64::FPR32RegClass;
6032     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6033     break;
6034   case MachineCombinerPattern::FNMULSUBD_OP1:
6035     Opc = AArch64::FNMADDDrrr;
6036     RC = &AArch64::FPR64RegClass;
6037     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6038     break;
6039 
6040   case MachineCombinerPattern::FMULSUBH_OP2:
6041     Opc = AArch64::FMSUBHrrr;
6042     RC = &AArch64::FPR16RegClass;
6043     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6044     break;
6045   case MachineCombinerPattern::FMULSUBS_OP2:
6046     Opc = AArch64::FMSUBSrrr;
6047     RC = &AArch64::FPR32RegClass;
6048     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6049     break;
6050   case MachineCombinerPattern::FMULSUBD_OP2:
6051     Opc = AArch64::FMSUBDrrr;
6052     RC = &AArch64::FPR64RegClass;
6053     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6054     break;
6055 
6056   case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
6057     Opc = AArch64::FMLSv1i32_indexed;
6058     RC = &AArch64::FPR32RegClass;
6059     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6060                            FMAInstKind::Indexed);
6061     break;
6062 
6063   case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
6064     Opc = AArch64::FMLSv1i64_indexed;
6065     RC = &AArch64::FPR64RegClass;
6066     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6067                            FMAInstKind::Indexed);
6068     break;
6069 
6070   case MachineCombinerPattern::FMLSv4f16_OP1:
6071   case MachineCombinerPattern::FMLSv4i16_indexed_OP1: {
6072     RC = &AArch64::FPR64RegClass;
6073     Register NewVR = MRI.createVirtualRegister(RC);
6074     MachineInstrBuilder MIB1 =
6075         BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f16), NewVR)
6076             .add(Root.getOperand(2));
6077     InsInstrs.push_back(MIB1);
6078     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6079     if (Pattern == MachineCombinerPattern::FMLSv4f16_OP1) {
6080       Opc = AArch64::FMLAv4f16;
6081       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6082                              FMAInstKind::Accumulator, &NewVR);
6083     } else {
6084       Opc = AArch64::FMLAv4i16_indexed;
6085       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6086                              FMAInstKind::Indexed, &NewVR);
6087     }
6088     break;
6089   }
6090   case MachineCombinerPattern::FMLSv4f16_OP2:
6091     RC = &AArch64::FPR64RegClass;
6092     Opc = AArch64::FMLSv4f16;
6093     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6094                            FMAInstKind::Accumulator);
6095     break;
6096   case MachineCombinerPattern::FMLSv4i16_indexed_OP2:
6097     RC = &AArch64::FPR64RegClass;
6098     Opc = AArch64::FMLSv4i16_indexed;
6099     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6100                            FMAInstKind::Indexed);
6101     break;
6102 
6103   case MachineCombinerPattern::FMLSv2f32_OP2:
6104   case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
6105     RC = &AArch64::FPR64RegClass;
6106     if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
6107       Opc = AArch64::FMLSv2i32_indexed;
6108       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6109                              FMAInstKind::Indexed);
6110     } else {
6111       Opc = AArch64::FMLSv2f32;
6112       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6113                              FMAInstKind::Accumulator);
6114     }
6115     break;
6116 
6117   case MachineCombinerPattern::FMLSv8f16_OP1:
6118   case MachineCombinerPattern::FMLSv8i16_indexed_OP1: {
6119     RC = &AArch64::FPR128RegClass;
6120     Register NewVR = MRI.createVirtualRegister(RC);
6121     MachineInstrBuilder MIB1 =
6122         BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv8f16), NewVR)
6123             .add(Root.getOperand(2));
6124     InsInstrs.push_back(MIB1);
6125     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6126     if (Pattern == MachineCombinerPattern::FMLSv8f16_OP1) {
6127       Opc = AArch64::FMLAv8f16;
6128       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6129                              FMAInstKind::Accumulator, &NewVR);
6130     } else {
6131       Opc = AArch64::FMLAv8i16_indexed;
6132       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6133                              FMAInstKind::Indexed, &NewVR);
6134     }
6135     break;
6136   }
6137   case MachineCombinerPattern::FMLSv8f16_OP2:
6138     RC = &AArch64::FPR128RegClass;
6139     Opc = AArch64::FMLSv8f16;
6140     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6141                            FMAInstKind::Accumulator);
6142     break;
6143   case MachineCombinerPattern::FMLSv8i16_indexed_OP2:
6144     RC = &AArch64::FPR128RegClass;
6145     Opc = AArch64::FMLSv8i16_indexed;
6146     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6147                            FMAInstKind::Indexed);
6148     break;
6149 
6150   case MachineCombinerPattern::FMLSv2f64_OP2:
6151   case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
6152     RC = &AArch64::FPR128RegClass;
6153     if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
6154       Opc = AArch64::FMLSv2i64_indexed;
6155       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6156                              FMAInstKind::Indexed);
6157     } else {
6158       Opc = AArch64::FMLSv2f64;
6159       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6160                              FMAInstKind::Accumulator);
6161     }
6162     break;
6163 
6164   case MachineCombinerPattern::FMLSv4f32_OP2:
6165   case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
6166     RC = &AArch64::FPR128RegClass;
6167     if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
6168       Opc = AArch64::FMLSv4i32_indexed;
6169       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6170                              FMAInstKind::Indexed);
6171     } else {
6172       Opc = AArch64::FMLSv4f32;
6173       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6174                              FMAInstKind::Accumulator);
6175     }
6176     break;
6177   case MachineCombinerPattern::FMLSv2f32_OP1:
6178   case MachineCombinerPattern::FMLSv2i32_indexed_OP1: {
6179     RC = &AArch64::FPR64RegClass;
6180     Register NewVR = MRI.createVirtualRegister(RC);
6181     MachineInstrBuilder MIB1 =
6182         BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR)
6183             .add(Root.getOperand(2));
6184     InsInstrs.push_back(MIB1);
6185     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6186     if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP1) {
6187       Opc = AArch64::FMLAv2i32_indexed;
6188       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6189                              FMAInstKind::Indexed, &NewVR);
6190     } else {
6191       Opc = AArch64::FMLAv2f32;
6192       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6193                              FMAInstKind::Accumulator, &NewVR);
6194     }
6195     break;
6196   }
6197   case MachineCombinerPattern::FMLSv4f32_OP1:
6198   case MachineCombinerPattern::FMLSv4i32_indexed_OP1: {
6199     RC = &AArch64::FPR128RegClass;
6200     Register NewVR = MRI.createVirtualRegister(RC);
6201     MachineInstrBuilder MIB1 =
6202         BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR)
6203             .add(Root.getOperand(2));
6204     InsInstrs.push_back(MIB1);
6205     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6206     if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP1) {
6207       Opc = AArch64::FMLAv4i32_indexed;
6208       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6209                              FMAInstKind::Indexed, &NewVR);
6210     } else {
6211       Opc = AArch64::FMLAv4f32;
6212       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6213                              FMAInstKind::Accumulator, &NewVR);
6214     }
6215     break;
6216   }
6217   case MachineCombinerPattern::FMLSv2f64_OP1:
6218   case MachineCombinerPattern::FMLSv2i64_indexed_OP1: {
6219     RC = &AArch64::FPR128RegClass;
6220     Register NewVR = MRI.createVirtualRegister(RC);
6221     MachineInstrBuilder MIB1 =
6222         BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR)
6223             .add(Root.getOperand(2));
6224     InsInstrs.push_back(MIB1);
6225     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6226     if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP1) {
6227       Opc = AArch64::FMLAv2i64_indexed;
6228       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6229                              FMAInstKind::Indexed, &NewVR);
6230     } else {
6231       Opc = AArch64::FMLAv2f64;
6232       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6233                              FMAInstKind::Accumulator, &NewVR);
6234     }
6235     break;
6236   }
6237   case MachineCombinerPattern::FMULv2i32_indexed_OP1:
6238   case MachineCombinerPattern::FMULv2i32_indexed_OP2: {
6239     unsigned IdxDupOp =
6240         (Pattern == MachineCombinerPattern::FMULv2i32_indexed_OP1) ? 1 : 2;
6241     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv2i32_indexed,
6242                        &AArch64::FPR128RegClass, MRI);
6243     break;
6244   }
6245   case MachineCombinerPattern::FMULv2i64_indexed_OP1:
6246   case MachineCombinerPattern::FMULv2i64_indexed_OP2: {
6247     unsigned IdxDupOp =
6248         (Pattern == MachineCombinerPattern::FMULv2i64_indexed_OP1) ? 1 : 2;
6249     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv2i64_indexed,
6250                        &AArch64::FPR128RegClass, MRI);
6251     break;
6252   }
6253   case MachineCombinerPattern::FMULv4i16_indexed_OP1:
6254   case MachineCombinerPattern::FMULv4i16_indexed_OP2: {
6255     unsigned IdxDupOp =
6256         (Pattern == MachineCombinerPattern::FMULv4i16_indexed_OP1) ? 1 : 2;
6257     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv4i16_indexed,
6258                        &AArch64::FPR128_loRegClass, MRI);
6259     break;
6260   }
6261   case MachineCombinerPattern::FMULv4i32_indexed_OP1:
6262   case MachineCombinerPattern::FMULv4i32_indexed_OP2: {
6263     unsigned IdxDupOp =
6264         (Pattern == MachineCombinerPattern::FMULv4i32_indexed_OP1) ? 1 : 2;
6265     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv4i32_indexed,
6266                        &AArch64::FPR128RegClass, MRI);
6267     break;
6268   }
6269   case MachineCombinerPattern::FMULv8i16_indexed_OP1:
6270   case MachineCombinerPattern::FMULv8i16_indexed_OP2: {
6271     unsigned IdxDupOp =
6272         (Pattern == MachineCombinerPattern::FMULv8i16_indexed_OP1) ? 1 : 2;
6273     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv8i16_indexed,
6274                        &AArch64::FPR128_loRegClass, MRI);
6275     break;
6276   }
6277   } // end switch (Pattern)
6278   // Record MUL and ADD/SUB for deletion
6279   if (MUL)
6280     DelInstrs.push_back(MUL);
6281   DelInstrs.push_back(&Root);
6282 }
6283 
6284 /// Replace csincr-branch sequence by simple conditional branch
6285 ///
6286 /// Examples:
6287 /// 1. \code
6288 ///   csinc  w9, wzr, wzr, <condition code>
6289 ///   tbnz   w9, #0, 0x44
6290 ///    \endcode
6291 /// to
6292 ///    \code
6293 ///   b.<inverted condition code>
6294 ///    \endcode
6295 ///
6296 /// 2. \code
6297 ///   csinc w9, wzr, wzr, <condition code>
6298 ///   tbz   w9, #0, 0x44
6299 ///    \endcode
6300 /// to
6301 ///    \code
6302 ///   b.<condition code>
6303 ///    \endcode
6304 ///
6305 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
6306 /// compare's constant operand is power of 2.
6307 ///
6308 /// Examples:
6309 ///    \code
6310 ///   and  w8, w8, #0x400
6311 ///   cbnz w8, L1
6312 ///    \endcode
6313 /// to
6314 ///    \code
6315 ///   tbnz w8, #10, L1
6316 ///    \endcode
6317 ///
6318 /// \param  MI Conditional Branch
6319 /// \return True when the simple conditional branch is generated
6320 ///
6321 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
6322   bool IsNegativeBranch = false;
6323   bool IsTestAndBranch = false;
6324   unsigned TargetBBInMI = 0;
6325   switch (MI.getOpcode()) {
6326   default:
6327     llvm_unreachable("Unknown branch instruction?");
6328   case AArch64::Bcc:
6329     return false;
6330   case AArch64::CBZW:
6331   case AArch64::CBZX:
6332     TargetBBInMI = 1;
6333     break;
6334   case AArch64::CBNZW:
6335   case AArch64::CBNZX:
6336     TargetBBInMI = 1;
6337     IsNegativeBranch = true;
6338     break;
6339   case AArch64::TBZW:
6340   case AArch64::TBZX:
6341     TargetBBInMI = 2;
6342     IsTestAndBranch = true;
6343     break;
6344   case AArch64::TBNZW:
6345   case AArch64::TBNZX:
6346     TargetBBInMI = 2;
6347     IsNegativeBranch = true;
6348     IsTestAndBranch = true;
6349     break;
6350   }
6351   // So we increment a zero register and test for bits other
6352   // than bit 0? Conservatively bail out in case the verifier
6353   // missed this case.
6354   if (IsTestAndBranch && MI.getOperand(1).getImm())
6355     return false;
6356 
6357   // Find Definition.
6358   assert(MI.getParent() && "Incomplete machine instruciton\n");
6359   MachineBasicBlock *MBB = MI.getParent();
6360   MachineFunction *MF = MBB->getParent();
6361   MachineRegisterInfo *MRI = &MF->getRegInfo();
6362   Register VReg = MI.getOperand(0).getReg();
6363   if (!Register::isVirtualRegister(VReg))
6364     return false;
6365 
6366   MachineInstr *DefMI = MRI->getVRegDef(VReg);
6367 
6368   // Look through COPY instructions to find definition.
6369   while (DefMI->isCopy()) {
6370     Register CopyVReg = DefMI->getOperand(1).getReg();
6371     if (!MRI->hasOneNonDBGUse(CopyVReg))
6372       return false;
6373     if (!MRI->hasOneDef(CopyVReg))
6374       return false;
6375     DefMI = MRI->getVRegDef(CopyVReg);
6376   }
6377 
6378   switch (DefMI->getOpcode()) {
6379   default:
6380     return false;
6381   // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
6382   case AArch64::ANDWri:
6383   case AArch64::ANDXri: {
6384     if (IsTestAndBranch)
6385       return false;
6386     if (DefMI->getParent() != MBB)
6387       return false;
6388     if (!MRI->hasOneNonDBGUse(VReg))
6389       return false;
6390 
6391     bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
6392     uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
6393         DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
6394     if (!isPowerOf2_64(Mask))
6395       return false;
6396 
6397     MachineOperand &MO = DefMI->getOperand(1);
6398     Register NewReg = MO.getReg();
6399     if (!Register::isVirtualRegister(NewReg))
6400       return false;
6401 
6402     assert(!MRI->def_empty(NewReg) && "Register must be defined.");
6403 
6404     MachineBasicBlock &RefToMBB = *MBB;
6405     MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
6406     DebugLoc DL = MI.getDebugLoc();
6407     unsigned Imm = Log2_64(Mask);
6408     unsigned Opc = (Imm < 32)
6409                        ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
6410                        : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
6411     MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
6412                               .addReg(NewReg)
6413                               .addImm(Imm)
6414                               .addMBB(TBB);
6415     // Register lives on to the CBZ now.
6416     MO.setIsKill(false);
6417 
6418     // For immediate smaller than 32, we need to use the 32-bit
6419     // variant (W) in all cases. Indeed the 64-bit variant does not
6420     // allow to encode them.
6421     // Therefore, if the input register is 64-bit, we need to take the
6422     // 32-bit sub-part.
6423     if (!Is32Bit && Imm < 32)
6424       NewMI->getOperand(0).setSubReg(AArch64::sub_32);
6425     MI.eraseFromParent();
6426     return true;
6427   }
6428   // Look for CSINC
6429   case AArch64::CSINCWr:
6430   case AArch64::CSINCXr: {
6431     if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
6432           DefMI->getOperand(2).getReg() == AArch64::WZR) &&
6433         !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
6434           DefMI->getOperand(2).getReg() == AArch64::XZR))
6435       return false;
6436 
6437     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
6438       return false;
6439 
6440     AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
6441     // Convert only when the condition code is not modified between
6442     // the CSINC and the branch. The CC may be used by other
6443     // instructions in between.
6444     if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
6445       return false;
6446     MachineBasicBlock &RefToMBB = *MBB;
6447     MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
6448     DebugLoc DL = MI.getDebugLoc();
6449     if (IsNegativeBranch)
6450       CC = AArch64CC::getInvertedCondCode(CC);
6451     BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
6452     MI.eraseFromParent();
6453     return true;
6454   }
6455   }
6456 }
6457 
6458 std::pair<unsigned, unsigned>
6459 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
6460   const unsigned Mask = AArch64II::MO_FRAGMENT;
6461   return std::make_pair(TF & Mask, TF & ~Mask);
6462 }
6463 
6464 ArrayRef<std::pair<unsigned, const char *>>
6465 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
6466   using namespace AArch64II;
6467 
6468   static const std::pair<unsigned, const char *> TargetFlags[] = {
6469       {MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"},
6470       {MO_G3, "aarch64-g3"},     {MO_G2, "aarch64-g2"},
6471       {MO_G1, "aarch64-g1"},     {MO_G0, "aarch64-g0"},
6472       {MO_HI12, "aarch64-hi12"}};
6473   return makeArrayRef(TargetFlags);
6474 }
6475 
6476 ArrayRef<std::pair<unsigned, const char *>>
6477 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
6478   using namespace AArch64II;
6479 
6480   static const std::pair<unsigned, const char *> TargetFlags[] = {
6481       {MO_COFFSTUB, "aarch64-coffstub"},
6482       {MO_GOT, "aarch64-got"},
6483       {MO_NC, "aarch64-nc"},
6484       {MO_S, "aarch64-s"},
6485       {MO_TLS, "aarch64-tls"},
6486       {MO_DLLIMPORT, "aarch64-dllimport"},
6487       {MO_PREL, "aarch64-prel"},
6488       {MO_TAGGED, "aarch64-tagged"}};
6489   return makeArrayRef(TargetFlags);
6490 }
6491 
6492 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
6493 AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
6494   static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
6495       {{MOSuppressPair, "aarch64-suppress-pair"},
6496        {MOStridedAccess, "aarch64-strided-access"}};
6497   return makeArrayRef(TargetFlags);
6498 }
6499 
6500 /// Constants defining how certain sequences should be outlined.
6501 /// This encompasses how an outlined function should be called, and what kind of
6502 /// frame should be emitted for that outlined function.
6503 ///
6504 /// \p MachineOutlinerDefault implies that the function should be called with
6505 /// a save and restore of LR to the stack.
6506 ///
6507 /// That is,
6508 ///
6509 /// I1     Save LR                    OUTLINED_FUNCTION:
6510 /// I2 --> BL OUTLINED_FUNCTION       I1
6511 /// I3     Restore LR                 I2
6512 ///                                   I3
6513 ///                                   RET
6514 ///
6515 /// * Call construction overhead: 3 (save + BL + restore)
6516 /// * Frame construction overhead: 1 (ret)
6517 /// * Requires stack fixups? Yes
6518 ///
6519 /// \p MachineOutlinerTailCall implies that the function is being created from
6520 /// a sequence of instructions ending in a return.
6521 ///
6522 /// That is,
6523 ///
6524 /// I1                             OUTLINED_FUNCTION:
6525 /// I2 --> B OUTLINED_FUNCTION     I1
6526 /// RET                            I2
6527 ///                                RET
6528 ///
6529 /// * Call construction overhead: 1 (B)
6530 /// * Frame construction overhead: 0 (Return included in sequence)
6531 /// * Requires stack fixups? No
6532 ///
6533 /// \p MachineOutlinerNoLRSave implies that the function should be called using
6534 /// a BL instruction, but doesn't require LR to be saved and restored. This
6535 /// happens when LR is known to be dead.
6536 ///
6537 /// That is,
6538 ///
6539 /// I1                                OUTLINED_FUNCTION:
6540 /// I2 --> BL OUTLINED_FUNCTION       I1
6541 /// I3                                I2
6542 ///                                   I3
6543 ///                                   RET
6544 ///
6545 /// * Call construction overhead: 1 (BL)
6546 /// * Frame construction overhead: 1 (RET)
6547 /// * Requires stack fixups? No
6548 ///
6549 /// \p MachineOutlinerThunk implies that the function is being created from
6550 /// a sequence of instructions ending in a call. The outlined function is
6551 /// called with a BL instruction, and the outlined function tail-calls the
6552 /// original call destination.
6553 ///
6554 /// That is,
6555 ///
6556 /// I1                                OUTLINED_FUNCTION:
6557 /// I2 --> BL OUTLINED_FUNCTION       I1
6558 /// BL f                              I2
6559 ///                                   B f
6560 /// * Call construction overhead: 1 (BL)
6561 /// * Frame construction overhead: 0
6562 /// * Requires stack fixups? No
6563 ///
6564 /// \p MachineOutlinerRegSave implies that the function should be called with a
6565 /// save and restore of LR to an available register. This allows us to avoid
6566 /// stack fixups. Note that this outlining variant is compatible with the
6567 /// NoLRSave case.
6568 ///
6569 /// That is,
6570 ///
6571 /// I1     Save LR                    OUTLINED_FUNCTION:
6572 /// I2 --> BL OUTLINED_FUNCTION       I1
6573 /// I3     Restore LR                 I2
6574 ///                                   I3
6575 ///                                   RET
6576 ///
6577 /// * Call construction overhead: 3 (save + BL + restore)
6578 /// * Frame construction overhead: 1 (ret)
6579 /// * Requires stack fixups? No
6580 enum MachineOutlinerClass {
6581   MachineOutlinerDefault,  /// Emit a save, restore, call, and return.
6582   MachineOutlinerTailCall, /// Only emit a branch.
6583   MachineOutlinerNoLRSave, /// Emit a call and return.
6584   MachineOutlinerThunk,    /// Emit a call and tail-call.
6585   MachineOutlinerRegSave   /// Same as default, but save to a register.
6586 };
6587 
6588 enum MachineOutlinerMBBFlags {
6589   LRUnavailableSomewhere = 0x2,
6590   HasCalls = 0x4,
6591   UnsafeRegsDead = 0x8
6592 };
6593 
6594 unsigned
6595 AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
6596   assert(C.LRUWasSet && "LRU wasn't set?");
6597   MachineFunction *MF = C.getMF();
6598   const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
6599       MF->getSubtarget().getRegisterInfo());
6600 
6601   // Check if there is an available register across the sequence that we can
6602   // use.
6603   for (unsigned Reg : AArch64::GPR64RegClass) {
6604     if (!ARI->isReservedReg(*MF, Reg) &&
6605         Reg != AArch64::LR &&  // LR is not reserved, but don't use it.
6606         Reg != AArch64::X16 && // X16 is not guaranteed to be preserved.
6607         Reg != AArch64::X17 && // Ditto for X17.
6608         C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
6609       return Reg;
6610   }
6611 
6612   // No suitable register. Return 0.
6613   return 0u;
6614 }
6615 
6616 static bool
6617 outliningCandidatesSigningScopeConsensus(const outliner::Candidate &a,
6618                                          const outliner::Candidate &b) {
6619   const auto &MFIa = a.getMF()->getInfo<AArch64FunctionInfo>();
6620   const auto &MFIb = b.getMF()->getInfo<AArch64FunctionInfo>();
6621 
6622   return MFIa->shouldSignReturnAddress(false) == MFIb->shouldSignReturnAddress(false) &&
6623          MFIa->shouldSignReturnAddress(true) == MFIb->shouldSignReturnAddress(true);
6624 }
6625 
6626 static bool
6627 outliningCandidatesSigningKeyConsensus(const outliner::Candidate &a,
6628                                        const outliner::Candidate &b) {
6629   const auto &MFIa = a.getMF()->getInfo<AArch64FunctionInfo>();
6630   const auto &MFIb = b.getMF()->getInfo<AArch64FunctionInfo>();
6631 
6632   return MFIa->shouldSignWithBKey() == MFIb->shouldSignWithBKey();
6633 }
6634 
6635 static bool outliningCandidatesV8_3OpsConsensus(const outliner::Candidate &a,
6636                                                 const outliner::Candidate &b) {
6637   const AArch64Subtarget &SubtargetA =
6638       a.getMF()->getSubtarget<AArch64Subtarget>();
6639   const AArch64Subtarget &SubtargetB =
6640       b.getMF()->getSubtarget<AArch64Subtarget>();
6641   return SubtargetA.hasV8_3aOps() == SubtargetB.hasV8_3aOps();
6642 }
6643 
6644 outliner::OutlinedFunction AArch64InstrInfo::getOutliningCandidateInfo(
6645     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
6646   outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
6647   unsigned SequenceSize =
6648       std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
6649                       [this](unsigned Sum, const MachineInstr &MI) {
6650                         return Sum + getInstSizeInBytes(MI);
6651                       });
6652   unsigned NumBytesToCreateFrame = 0;
6653 
6654   // We only allow outlining for functions having exactly matching return
6655   // address signing attributes, i.e., all share the same value for the
6656   // attribute "sign-return-address" and all share the same type of key they
6657   // are signed with.
6658   // Additionally we require all functions to simultaniously either support
6659   // v8.3a features or not. Otherwise an outlined function could get signed
6660   // using dedicated v8.3 instructions and a call from a function that doesn't
6661   // support v8.3 instructions would therefore be invalid.
6662   if (std::adjacent_find(
6663           RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
6664           [](const outliner::Candidate &a, const outliner::Candidate &b) {
6665             // Return true if a and b are non-equal w.r.t. return address
6666             // signing or support of v8.3a features
6667             if (outliningCandidatesSigningScopeConsensus(a, b) &&
6668                 outliningCandidatesSigningKeyConsensus(a, b) &&
6669                 outliningCandidatesV8_3OpsConsensus(a, b)) {
6670               return false;
6671             }
6672             return true;
6673           }) != RepeatedSequenceLocs.end()) {
6674     return outliner::OutlinedFunction();
6675   }
6676 
6677   // Since at this point all candidates agree on their return address signing
6678   // picking just one is fine. If the candidate functions potentially sign their
6679   // return addresses, the outlined function should do the same. Note that in
6680   // the case of "sign-return-address"="non-leaf" this is an assumption: It is
6681   // not certainly true that the outlined function will have to sign its return
6682   // address but this decision is made later, when the decision to outline
6683   // has already been made.
6684   // The same holds for the number of additional instructions we need: On
6685   // v8.3a RET can be replaced by RETAA/RETAB and no AUT instruction is
6686   // necessary. However, at this point we don't know if the outlined function
6687   // will have a RET instruction so we assume the worst.
6688   const TargetRegisterInfo &TRI = getRegisterInfo();
6689   if (FirstCand.getMF()
6690           ->getInfo<AArch64FunctionInfo>()
6691           ->shouldSignReturnAddress(true)) {
6692     // One PAC and one AUT instructions
6693     NumBytesToCreateFrame += 8;
6694 
6695     // We have to check if sp modifying instructions would get outlined.
6696     // If so we only allow outlining if sp is unchanged overall, so matching
6697     // sub and add instructions are okay to outline, all other sp modifications
6698     // are not
6699     auto hasIllegalSPModification = [&TRI](outliner::Candidate &C) {
6700       int SPValue = 0;
6701       MachineBasicBlock::iterator MBBI = C.front();
6702       for (;;) {
6703         if (MBBI->modifiesRegister(AArch64::SP, &TRI)) {
6704           switch (MBBI->getOpcode()) {
6705           case AArch64::ADDXri:
6706           case AArch64::ADDWri:
6707             assert(MBBI->getNumOperands() == 4 && "Wrong number of operands");
6708             assert(MBBI->getOperand(2).isImm() &&
6709                    "Expected operand to be immediate");
6710             assert(MBBI->getOperand(1).isReg() &&
6711                    "Expected operand to be a register");
6712             // Check if the add just increments sp. If so, we search for
6713             // matching sub instructions that decrement sp. If not, the
6714             // modification is illegal
6715             if (MBBI->getOperand(1).getReg() == AArch64::SP)
6716               SPValue += MBBI->getOperand(2).getImm();
6717             else
6718               return true;
6719             break;
6720           case AArch64::SUBXri:
6721           case AArch64::SUBWri:
6722             assert(MBBI->getNumOperands() == 4 && "Wrong number of operands");
6723             assert(MBBI->getOperand(2).isImm() &&
6724                    "Expected operand to be immediate");
6725             assert(MBBI->getOperand(1).isReg() &&
6726                    "Expected operand to be a register");
6727             // Check if the sub just decrements sp. If so, we search for
6728             // matching add instructions that increment sp. If not, the
6729             // modification is illegal
6730             if (MBBI->getOperand(1).getReg() == AArch64::SP)
6731               SPValue -= MBBI->getOperand(2).getImm();
6732             else
6733               return true;
6734             break;
6735           default:
6736             return true;
6737           }
6738         }
6739         if (MBBI == C.back())
6740           break;
6741         ++MBBI;
6742       }
6743       if (SPValue)
6744         return true;
6745       return false;
6746     };
6747     // Remove candidates with illegal stack modifying instructions
6748     llvm::erase_if(RepeatedSequenceLocs, hasIllegalSPModification);
6749 
6750     // If the sequence doesn't have enough candidates left, then we're done.
6751     if (RepeatedSequenceLocs.size() < 2)
6752       return outliner::OutlinedFunction();
6753   }
6754 
6755   // Properties about candidate MBBs that hold for all of them.
6756   unsigned FlagsSetInAll = 0xF;
6757 
6758   // Compute liveness information for each candidate, and set FlagsSetInAll.
6759   std::for_each(RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
6760                 [&FlagsSetInAll](outliner::Candidate &C) {
6761                   FlagsSetInAll &= C.Flags;
6762                 });
6763 
6764   // According to the AArch64 Procedure Call Standard, the following are
6765   // undefined on entry/exit from a function call:
6766   //
6767   // * Registers x16, x17, (and thus w16, w17)
6768   // * Condition codes (and thus the NZCV register)
6769   //
6770   // Because if this, we can't outline any sequence of instructions where
6771   // one
6772   // of these registers is live into/across it. Thus, we need to delete
6773   // those
6774   // candidates.
6775   auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
6776     // If the unsafe registers in this block are all dead, then we don't need
6777     // to compute liveness here.
6778     if (C.Flags & UnsafeRegsDead)
6779       return false;
6780     C.initLRU(TRI);
6781     LiveRegUnits LRU = C.LRU;
6782     return (!LRU.available(AArch64::W16) || !LRU.available(AArch64::W17) ||
6783             !LRU.available(AArch64::NZCV));
6784   };
6785 
6786   // Are there any candidates where those registers are live?
6787   if (!(FlagsSetInAll & UnsafeRegsDead)) {
6788     // Erase every candidate that violates the restrictions above. (It could be
6789     // true that we have viable candidates, so it's not worth bailing out in
6790     // the case that, say, 1 out of 20 candidates violate the restructions.)
6791     llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
6792 
6793     // If the sequence doesn't have enough candidates left, then we're done.
6794     if (RepeatedSequenceLocs.size() < 2)
6795       return outliner::OutlinedFunction();
6796   }
6797 
6798   // At this point, we have only "safe" candidates to outline. Figure out
6799   // frame + call instruction information.
6800 
6801   unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
6802 
6803   // Helper lambda which sets call information for every candidate.
6804   auto SetCandidateCallInfo =
6805       [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
6806         for (outliner::Candidate &C : RepeatedSequenceLocs)
6807           C.setCallInfo(CallID, NumBytesForCall);
6808       };
6809 
6810   unsigned FrameID = MachineOutlinerDefault;
6811   NumBytesToCreateFrame += 4;
6812 
6813   bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) {
6814     return C.getMF()->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement();
6815   });
6816 
6817   // We check to see if CFI Instructions are present, and if they are
6818   // we find the number of CFI Instructions in the candidates.
6819   unsigned CFICount = 0;
6820   MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front();
6821   for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx();
6822        Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) {
6823     if (MBBI->isCFIInstruction())
6824       CFICount++;
6825     MBBI++;
6826   }
6827 
6828   // We compare the number of found CFI Instructions to  the number of CFI
6829   // instructions in the parent function for each candidate.  We must check this
6830   // since if we outline one of the CFI instructions in a function, we have to
6831   // outline them all for correctness. If we do not, the address offsets will be
6832   // incorrect between the two sections of the program.
6833   for (outliner::Candidate &C : RepeatedSequenceLocs) {
6834     std::vector<MCCFIInstruction> CFIInstructions =
6835         C.getMF()->getFrameInstructions();
6836 
6837     if (CFICount > 0 && CFICount != CFIInstructions.size())
6838       return outliner::OutlinedFunction();
6839   }
6840 
6841   // Returns true if an instructions is safe to fix up, false otherwise.
6842   auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) {
6843     if (MI.isCall())
6844       return true;
6845 
6846     if (!MI.modifiesRegister(AArch64::SP, &TRI) &&
6847         !MI.readsRegister(AArch64::SP, &TRI))
6848       return true;
6849 
6850     // Any modification of SP will break our code to save/restore LR.
6851     // FIXME: We could handle some instructions which add a constant
6852     // offset to SP, with a bit more work.
6853     if (MI.modifiesRegister(AArch64::SP, &TRI))
6854       return false;
6855 
6856     // At this point, we have a stack instruction that we might need to
6857     // fix up. We'll handle it if it's a load or store.
6858     if (MI.mayLoadOrStore()) {
6859       const MachineOperand *Base; // Filled with the base operand of MI.
6860       int64_t Offset;             // Filled with the offset of MI.
6861       bool OffsetIsScalable;
6862 
6863       // Does it allow us to offset the base operand and is the base the
6864       // register SP?
6865       if (!getMemOperandWithOffset(MI, Base, Offset, OffsetIsScalable, &TRI) ||
6866           !Base->isReg() || Base->getReg() != AArch64::SP)
6867         return false;
6868 
6869       // Fixe-up code below assumes bytes.
6870       if (OffsetIsScalable)
6871         return false;
6872 
6873       // Find the minimum/maximum offset for this instruction and check
6874       // if fixing it up would be in range.
6875       int64_t MinOffset,
6876           MaxOffset;  // Unscaled offsets for the instruction.
6877       TypeSize Scale(0U, false); // The scale to multiply the offsets by.
6878       unsigned DummyWidth;
6879       getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
6880 
6881       Offset += 16; // Update the offset to what it would be if we outlined.
6882       if (Offset < MinOffset * (int64_t)Scale.getFixedSize() ||
6883           Offset > MaxOffset * (int64_t)Scale.getFixedSize())
6884         return false;
6885 
6886       // It's in range, so we can outline it.
6887       return true;
6888     }
6889 
6890     // FIXME: Add handling for instructions like "add x0, sp, #8".
6891 
6892     // We can't fix it up, so don't outline it.
6893     return false;
6894   };
6895 
6896   // True if it's possible to fix up each stack instruction in this sequence.
6897   // Important for frames/call variants that modify the stack.
6898   bool AllStackInstrsSafe = std::all_of(
6899       FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup);
6900 
6901   // If the last instruction in any candidate is a terminator, then we should
6902   // tail call all of the candidates.
6903   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
6904     FrameID = MachineOutlinerTailCall;
6905     NumBytesToCreateFrame = 0;
6906     SetCandidateCallInfo(MachineOutlinerTailCall, 4);
6907   }
6908 
6909   else if (LastInstrOpcode == AArch64::BL ||
6910            ((LastInstrOpcode == AArch64::BLR ||
6911              LastInstrOpcode == AArch64::BLRNoIP) &&
6912             !HasBTI)) {
6913     // FIXME: Do we need to check if the code after this uses the value of LR?
6914     FrameID = MachineOutlinerThunk;
6915     NumBytesToCreateFrame = 0;
6916     SetCandidateCallInfo(MachineOutlinerThunk, 4);
6917   }
6918 
6919   else {
6920     // We need to decide how to emit calls + frames. We can always emit the same
6921     // frame if we don't need to save to the stack. If we have to save to the
6922     // stack, then we need a different frame.
6923     unsigned NumBytesNoStackCalls = 0;
6924     std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
6925 
6926     // Check if we have to save LR.
6927     for (outliner::Candidate &C : RepeatedSequenceLocs) {
6928       C.initLRU(TRI);
6929 
6930       // If we have a noreturn caller, then we're going to be conservative and
6931       // say that we have to save LR. If we don't have a ret at the end of the
6932       // block, then we can't reason about liveness accurately.
6933       //
6934       // FIXME: We can probably do better than always disabling this in
6935       // noreturn functions by fixing up the liveness info.
6936       bool IsNoReturn =
6937           C.getMF()->getFunction().hasFnAttribute(Attribute::NoReturn);
6938 
6939       // Is LR available? If so, we don't need a save.
6940       if (C.LRU.available(AArch64::LR) && !IsNoReturn) {
6941         NumBytesNoStackCalls += 4;
6942         C.setCallInfo(MachineOutlinerNoLRSave, 4);
6943         CandidatesWithoutStackFixups.push_back(C);
6944       }
6945 
6946       // Is an unused register available? If so, we won't modify the stack, so
6947       // we can outline with the same frame type as those that don't save LR.
6948       else if (findRegisterToSaveLRTo(C)) {
6949         NumBytesNoStackCalls += 12;
6950         C.setCallInfo(MachineOutlinerRegSave, 12);
6951         CandidatesWithoutStackFixups.push_back(C);
6952       }
6953 
6954       // Is SP used in the sequence at all? If not, we don't have to modify
6955       // the stack, so we are guaranteed to get the same frame.
6956       else if (C.UsedInSequence.available(AArch64::SP)) {
6957         NumBytesNoStackCalls += 12;
6958         C.setCallInfo(MachineOutlinerDefault, 12);
6959         CandidatesWithoutStackFixups.push_back(C);
6960       }
6961 
6962       // If we outline this, we need to modify the stack. Pretend we don't
6963       // outline this by saving all of its bytes.
6964       else {
6965         NumBytesNoStackCalls += SequenceSize;
6966       }
6967     }
6968 
6969     // If there are no places where we have to save LR, then note that we
6970     // don't have to update the stack. Otherwise, give every candidate the
6971     // default call type, as long as it's safe to do so.
6972     if (!AllStackInstrsSafe ||
6973         NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) {
6974       RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6975       FrameID = MachineOutlinerNoLRSave;
6976     } else {
6977       SetCandidateCallInfo(MachineOutlinerDefault, 12);
6978 
6979       // Bugzilla ID: 46767
6980       // TODO: Check if fixing up the stack more than once is safe so we can
6981       // outline these.
6982       //
6983       // An outline resulting in a caller that requires stack fixups at the
6984       // callsite to a callee that also requires stack fixups can happen when
6985       // there are no available registers at the candidate callsite for a
6986       // candidate that itself also has calls.
6987       //
6988       // In other words if function_containing_sequence in the following pseudo
6989       // assembly requires that we save LR at the point of the call, but there
6990       // are no available registers: in this case we save using SP and as a
6991       // result the SP offsets requires stack fixups by multiples of 16.
6992       //
6993       // function_containing_sequence:
6994       //   ...
6995       //   save LR to SP <- Requires stack instr fixups in OUTLINED_FUNCTION_N
6996       //   call OUTLINED_FUNCTION_N
6997       //   restore LR from SP
6998       //   ...
6999       //
7000       // OUTLINED_FUNCTION_N:
7001       //   save LR to SP <- Requires stack instr fixups in OUTLINED_FUNCTION_N
7002       //   ...
7003       //   bl foo
7004       //   restore LR from SP
7005       //   ret
7006       //
7007       // Because the code to handle more than one stack fixup does not
7008       // currently have the proper checks for legality, these cases will assert
7009       // in the AArch64 MachineOutliner. This is because the code to do this
7010       // needs more hardening, testing, better checks that generated code is
7011       // legal, etc and because it is only verified to handle a single pass of
7012       // stack fixup.
7013       //
7014       // The assert happens in AArch64InstrInfo::buildOutlinedFrame to catch
7015       // these cases until they are known to be handled. Bugzilla 46767 is
7016       // referenced in comments at the assert site.
7017       //
7018       // To avoid asserting (or generating non-legal code on noassert builds)
7019       // we remove all candidates which would need more than one stack fixup by
7020       // pruning the cases where the candidate has calls while also having no
7021       // available LR and having no available general purpose registers to copy
7022       // LR to (ie one extra stack save/restore).
7023       //
7024       if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
7025         erase_if(RepeatedSequenceLocs, [this](outliner::Candidate &C) {
7026           return (std::any_of(
7027                      C.front(), std::next(C.back()),
7028                      [](const MachineInstr &MI) { return MI.isCall(); })) &&
7029                  (!C.LRU.available(AArch64::LR) || !findRegisterToSaveLRTo(C));
7030         });
7031       }
7032     }
7033 
7034     // If we dropped all of the candidates, bail out here.
7035     if (RepeatedSequenceLocs.size() < 2) {
7036       RepeatedSequenceLocs.clear();
7037       return outliner::OutlinedFunction();
7038     }
7039   }
7040 
7041   // Does every candidate's MBB contain a call? If so, then we might have a call
7042   // in the range.
7043   if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
7044     // Check if the range contains a call. These require a save + restore of the
7045     // link register.
7046     bool ModStackToSaveLR = false;
7047     if (std::any_of(FirstCand.front(), FirstCand.back(),
7048                     [](const MachineInstr &MI) { return MI.isCall(); }))
7049       ModStackToSaveLR = true;
7050 
7051     // Handle the last instruction separately. If this is a tail call, then the
7052     // last instruction is a call. We don't want to save + restore in this case.
7053     // However, it could be possible that the last instruction is a call without
7054     // it being valid to tail call this sequence. We should consider this as
7055     // well.
7056     else if (FrameID != MachineOutlinerThunk &&
7057              FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
7058       ModStackToSaveLR = true;
7059 
7060     if (ModStackToSaveLR) {
7061       // We can't fix up the stack. Bail out.
7062       if (!AllStackInstrsSafe) {
7063         RepeatedSequenceLocs.clear();
7064         return outliner::OutlinedFunction();
7065       }
7066 
7067       // Save + restore LR.
7068       NumBytesToCreateFrame += 8;
7069     }
7070   }
7071 
7072   // If we have CFI instructions, we can only outline if the outlined section
7073   // can be a tail call
7074   if (FrameID != MachineOutlinerTailCall && CFICount > 0)
7075     return outliner::OutlinedFunction();
7076 
7077   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
7078                                     NumBytesToCreateFrame, FrameID);
7079 }
7080 
7081 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
7082     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
7083   const Function &F = MF.getFunction();
7084 
7085   // Can F be deduplicated by the linker? If it can, don't outline from it.
7086   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
7087     return false;
7088 
7089   // Don't outline from functions with section markings; the program could
7090   // expect that all the code is in the named section.
7091   // FIXME: Allow outlining from multiple functions with the same section
7092   // marking.
7093   if (F.hasSection())
7094     return false;
7095 
7096   // Outlining from functions with redzones is unsafe since the outliner may
7097   // modify the stack. Check if hasRedZone is true or unknown; if yes, don't
7098   // outline from it.
7099   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
7100   if (!AFI || AFI->hasRedZone().getValueOr(true))
7101     return false;
7102 
7103   // FIXME: Teach the outliner to generate/handle Windows unwind info.
7104   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
7105     return false;
7106 
7107   // It's safe to outline from MF.
7108   return true;
7109 }
7110 
7111 bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
7112                                               unsigned &Flags) const {
7113   if (!TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags))
7114     return false;
7115   // Check if LR is available through all of the MBB. If it's not, then set
7116   // a flag.
7117   assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
7118          "Suitable Machine Function for outlining must track liveness");
7119   LiveRegUnits LRU(getRegisterInfo());
7120 
7121   std::for_each(MBB.rbegin(), MBB.rend(),
7122                 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
7123 
7124   // Check if each of the unsafe registers are available...
7125   bool W16AvailableInBlock = LRU.available(AArch64::W16);
7126   bool W17AvailableInBlock = LRU.available(AArch64::W17);
7127   bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV);
7128 
7129   // If all of these are dead (and not live out), we know we don't have to check
7130   // them later.
7131   if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock)
7132     Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
7133 
7134   // Now, add the live outs to the set.
7135   LRU.addLiveOuts(MBB);
7136 
7137   // If any of these registers is available in the MBB, but also a live out of
7138   // the block, then we know outlining is unsafe.
7139   if (W16AvailableInBlock && !LRU.available(AArch64::W16))
7140     return false;
7141   if (W17AvailableInBlock && !LRU.available(AArch64::W17))
7142     return false;
7143   if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV))
7144     return false;
7145 
7146   // Check if there's a call inside this MachineBasicBlock. If there is, then
7147   // set a flag.
7148   if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
7149     Flags |= MachineOutlinerMBBFlags::HasCalls;
7150 
7151   MachineFunction *MF = MBB.getParent();
7152 
7153   // In the event that we outline, we may have to save LR. If there is an
7154   // available register in the MBB, then we'll always save LR there. Check if
7155   // this is true.
7156   bool CanSaveLR = false;
7157   const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
7158       MF->getSubtarget().getRegisterInfo());
7159 
7160   // Check if there is an available register across the sequence that we can
7161   // use.
7162   for (unsigned Reg : AArch64::GPR64RegClass) {
7163     if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR &&
7164         Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) {
7165       CanSaveLR = true;
7166       break;
7167     }
7168   }
7169 
7170   // Check if we have a register we can save LR to, and if LR was used
7171   // somewhere. If both of those things are true, then we need to evaluate the
7172   // safety of outlining stack instructions later.
7173   if (!CanSaveLR && !LRU.available(AArch64::LR))
7174     Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
7175 
7176   return true;
7177 }
7178 
7179 outliner::InstrType
7180 AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
7181                                    unsigned Flags) const {
7182   MachineInstr &MI = *MIT;
7183   MachineBasicBlock *MBB = MI.getParent();
7184   MachineFunction *MF = MBB->getParent();
7185   AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
7186 
7187   // Don't outline anything used for return address signing. The outlined
7188   // function will get signed later if needed
7189   switch (MI.getOpcode()) {
7190   case AArch64::PACIASP:
7191   case AArch64::PACIBSP:
7192   case AArch64::AUTIASP:
7193   case AArch64::AUTIBSP:
7194   case AArch64::RETAA:
7195   case AArch64::RETAB:
7196   case AArch64::EMITBKEY:
7197     return outliner::InstrType::Illegal;
7198   }
7199 
7200   // Don't outline LOHs.
7201   if (FuncInfo->getLOHRelated().count(&MI))
7202     return outliner::InstrType::Illegal;
7203 
7204   // We can only outline these if we will tail call the outlined function, or
7205   // fix up the CFI offsets. Currently, CFI instructions are outlined only if
7206   // in a tail call.
7207   //
7208   // FIXME: If the proper fixups for the offset are implemented, this should be
7209   // possible.
7210   if (MI.isCFIInstruction())
7211     return outliner::InstrType::Legal;
7212 
7213   // Don't allow debug values to impact outlining type.
7214   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
7215     return outliner::InstrType::Invisible;
7216 
7217   // At this point, KILL instructions don't really tell us much so we can go
7218   // ahead and skip over them.
7219   if (MI.isKill())
7220     return outliner::InstrType::Invisible;
7221 
7222   // Is this a terminator for a basic block?
7223   if (MI.isTerminator()) {
7224 
7225     // Is this the end of a function?
7226     if (MI.getParent()->succ_empty())
7227       return outliner::InstrType::Legal;
7228 
7229     // It's not, so don't outline it.
7230     return outliner::InstrType::Illegal;
7231   }
7232 
7233   // Make sure none of the operands are un-outlinable.
7234   for (const MachineOperand &MOP : MI.operands()) {
7235     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
7236         MOP.isTargetIndex())
7237       return outliner::InstrType::Illegal;
7238 
7239     // If it uses LR or W30 explicitly, then don't touch it.
7240     if (MOP.isReg() && !MOP.isImplicit() &&
7241         (MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30))
7242       return outliner::InstrType::Illegal;
7243   }
7244 
7245   // Special cases for instructions that can always be outlined, but will fail
7246   // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
7247   // be outlined because they don't require a *specific* value to be in LR.
7248   if (MI.getOpcode() == AArch64::ADRP)
7249     return outliner::InstrType::Legal;
7250 
7251   // If MI is a call we might be able to outline it. We don't want to outline
7252   // any calls that rely on the position of items on the stack. When we outline
7253   // something containing a call, we have to emit a save and restore of LR in
7254   // the outlined function. Currently, this always happens by saving LR to the
7255   // stack. Thus, if we outline, say, half the parameters for a function call
7256   // plus the call, then we'll break the callee's expectations for the layout
7257   // of the stack.
7258   //
7259   // FIXME: Allow calls to functions which construct a stack frame, as long
7260   // as they don't access arguments on the stack.
7261   // FIXME: Figure out some way to analyze functions defined in other modules.
7262   // We should be able to compute the memory usage based on the IR calling
7263   // convention, even if we can't see the definition.
7264   if (MI.isCall()) {
7265     // Get the function associated with the call. Look at each operand and find
7266     // the one that represents the callee and get its name.
7267     const Function *Callee = nullptr;
7268     for (const MachineOperand &MOP : MI.operands()) {
7269       if (MOP.isGlobal()) {
7270         Callee = dyn_cast<Function>(MOP.getGlobal());
7271         break;
7272       }
7273     }
7274 
7275     // Never outline calls to mcount.  There isn't any rule that would require
7276     // this, but the Linux kernel's "ftrace" feature depends on it.
7277     if (Callee && Callee->getName() == "\01_mcount")
7278       return outliner::InstrType::Illegal;
7279 
7280     // If we don't know anything about the callee, assume it depends on the
7281     // stack layout of the caller. In that case, it's only legal to outline
7282     // as a tail-call. Explicitly list the call instructions we know about so we
7283     // don't get unexpected results with call pseudo-instructions.
7284     auto UnknownCallOutlineType = outliner::InstrType::Illegal;
7285     if (MI.getOpcode() == AArch64::BLR ||
7286         MI.getOpcode() == AArch64::BLRNoIP || MI.getOpcode() == AArch64::BL)
7287       UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
7288 
7289     if (!Callee)
7290       return UnknownCallOutlineType;
7291 
7292     // We have a function we have information about. Check it if it's something
7293     // can safely outline.
7294     MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
7295 
7296     // We don't know what's going on with the callee at all. Don't touch it.
7297     if (!CalleeMF)
7298       return UnknownCallOutlineType;
7299 
7300     // Check if we know anything about the callee saves on the function. If we
7301     // don't, then don't touch it, since that implies that we haven't
7302     // computed anything about its stack frame yet.
7303     MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
7304     if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
7305         MFI.getNumObjects() > 0)
7306       return UnknownCallOutlineType;
7307 
7308     // At this point, we can say that CalleeMF ought to not pass anything on the
7309     // stack. Therefore, we can outline it.
7310     return outliner::InstrType::Legal;
7311   }
7312 
7313   // Don't outline positions.
7314   if (MI.isPosition())
7315     return outliner::InstrType::Illegal;
7316 
7317   // Don't touch the link register or W30.
7318   if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) ||
7319       MI.modifiesRegister(AArch64::W30, &getRegisterInfo()))
7320     return outliner::InstrType::Illegal;
7321 
7322   // Don't outline BTI instructions, because that will prevent the outlining
7323   // site from being indirectly callable.
7324   if (MI.getOpcode() == AArch64::HINT) {
7325     int64_t Imm = MI.getOperand(0).getImm();
7326     if (Imm == 32 || Imm == 34 || Imm == 36 || Imm == 38)
7327       return outliner::InstrType::Illegal;
7328   }
7329 
7330   return outliner::InstrType::Legal;
7331 }
7332 
7333 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
7334   for (MachineInstr &MI : MBB) {
7335     const MachineOperand *Base;
7336     unsigned Width;
7337     int64_t Offset;
7338     bool OffsetIsScalable;
7339 
7340     // Is this a load or store with an immediate offset with SP as the base?
7341     if (!MI.mayLoadOrStore() ||
7342         !getMemOperandWithOffsetWidth(MI, Base, Offset, OffsetIsScalable, Width,
7343                                       &RI) ||
7344         (Base->isReg() && Base->getReg() != AArch64::SP))
7345       continue;
7346 
7347     // It is, so we have to fix it up.
7348     TypeSize Scale(0U, false);
7349     int64_t Dummy1, Dummy2;
7350 
7351     MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
7352     assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
7353     getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
7354     assert(Scale != 0 && "Unexpected opcode!");
7355     assert(!OffsetIsScalable && "Expected offset to be a byte offset");
7356 
7357     // We've pushed the return address to the stack, so add 16 to the offset.
7358     // This is safe, since we already checked if it would overflow when we
7359     // checked if this instruction was legal to outline.
7360     int64_t NewImm = (Offset + 16) / (int64_t)Scale.getFixedSize();
7361     StackOffsetOperand.setImm(NewImm);
7362   }
7363 }
7364 
7365 static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
7366                                  bool ShouldSignReturnAddr,
7367                                  bool ShouldSignReturnAddrWithAKey) {
7368   if (ShouldSignReturnAddr) {
7369     MachineBasicBlock::iterator MBBPAC = MBB.begin();
7370     MachineBasicBlock::iterator MBBAUT = MBB.getFirstTerminator();
7371     const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
7372     const TargetInstrInfo *TII = Subtarget.getInstrInfo();
7373     DebugLoc DL;
7374 
7375     if (MBBAUT != MBB.end())
7376       DL = MBBAUT->getDebugLoc();
7377 
7378     // At the very beginning of the basic block we insert the following
7379     // depending on the key type
7380     //
7381     // a_key:                   b_key:
7382     //    PACIASP                   EMITBKEY
7383     //    CFI_INSTRUCTION           PACIBSP
7384     //                              CFI_INSTRUCTION
7385     unsigned PACI;
7386     if (ShouldSignReturnAddrWithAKey) {
7387       PACI = Subtarget.hasPAuth() ? AArch64::PACIA : AArch64::PACIASP;
7388     } else {
7389       BuildMI(MBB, MBBPAC, DebugLoc(), TII->get(AArch64::EMITBKEY))
7390           .setMIFlag(MachineInstr::FrameSetup);
7391       PACI = Subtarget.hasPAuth() ? AArch64::PACIB : AArch64::PACIBSP;
7392     }
7393 
7394     auto MI = BuildMI(MBB, MBBPAC, DebugLoc(), TII->get(PACI));
7395     if (Subtarget.hasPAuth())
7396       MI.addReg(AArch64::LR, RegState::Define)
7397           .addReg(AArch64::LR)
7398           .addReg(AArch64::SP, RegState::InternalRead);
7399     MI.setMIFlag(MachineInstr::FrameSetup);
7400 
7401     unsigned CFIIndex =
7402         MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
7403     BuildMI(MBB, MBBPAC, DebugLoc(), TII->get(AArch64::CFI_INSTRUCTION))
7404         .addCFIIndex(CFIIndex)
7405         .setMIFlags(MachineInstr::FrameSetup);
7406 
7407     // If v8.3a features are available we can replace a RET instruction by
7408     // RETAA or RETAB and omit the AUT instructions
7409     if (Subtarget.hasPAuth() && MBBAUT != MBB.end() &&
7410         MBBAUT->getOpcode() == AArch64::RET) {
7411       BuildMI(MBB, MBBAUT, DL,
7412               TII->get(ShouldSignReturnAddrWithAKey ? AArch64::RETAA
7413                                                     : AArch64::RETAB))
7414           .copyImplicitOps(*MBBAUT);
7415       MBB.erase(MBBAUT);
7416     } else {
7417       BuildMI(MBB, MBBAUT, DL,
7418               TII->get(ShouldSignReturnAddrWithAKey ? AArch64::AUTIASP
7419                                                     : AArch64::AUTIBSP))
7420           .setMIFlag(MachineInstr::FrameDestroy);
7421     }
7422   }
7423 }
7424 
7425 void AArch64InstrInfo::buildOutlinedFrame(
7426     MachineBasicBlock &MBB, MachineFunction &MF,
7427     const outliner::OutlinedFunction &OF) const {
7428 
7429   AArch64FunctionInfo *FI = MF.getInfo<AArch64FunctionInfo>();
7430 
7431   if (OF.FrameConstructionID == MachineOutlinerTailCall)
7432     FI->setOutliningStyle("Tail Call");
7433   else if (OF.FrameConstructionID == MachineOutlinerThunk) {
7434     // For thunk outlining, rewrite the last instruction from a call to a
7435     // tail-call.
7436     MachineInstr *Call = &*--MBB.instr_end();
7437     unsigned TailOpcode;
7438     if (Call->getOpcode() == AArch64::BL) {
7439       TailOpcode = AArch64::TCRETURNdi;
7440     } else {
7441       assert(Call->getOpcode() == AArch64::BLR ||
7442              Call->getOpcode() == AArch64::BLRNoIP);
7443       TailOpcode = AArch64::TCRETURNriALL;
7444     }
7445     MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode))
7446                            .add(Call->getOperand(0))
7447                            .addImm(0);
7448     MBB.insert(MBB.end(), TC);
7449     Call->eraseFromParent();
7450 
7451     FI->setOutliningStyle("Thunk");
7452   }
7453 
7454   bool IsLeafFunction = true;
7455 
7456   // Is there a call in the outlined range?
7457   auto IsNonTailCall = [](const MachineInstr &MI) {
7458     return MI.isCall() && !MI.isReturn();
7459   };
7460 
7461   if (llvm::any_of(MBB.instrs(), IsNonTailCall)) {
7462     // Fix up the instructions in the range, since we're going to modify the
7463     // stack.
7464 
7465     // Bugzilla ID: 46767
7466     // TODO: Check if fixing up twice is safe so we can outline these.
7467     assert(OF.FrameConstructionID != MachineOutlinerDefault &&
7468            "Can only fix up stack references once");
7469     fixupPostOutline(MBB);
7470 
7471     IsLeafFunction = false;
7472 
7473     // LR has to be a live in so that we can save it.
7474     if (!MBB.isLiveIn(AArch64::LR))
7475       MBB.addLiveIn(AArch64::LR);
7476 
7477     MachineBasicBlock::iterator It = MBB.begin();
7478     MachineBasicBlock::iterator Et = MBB.end();
7479 
7480     if (OF.FrameConstructionID == MachineOutlinerTailCall ||
7481         OF.FrameConstructionID == MachineOutlinerThunk)
7482       Et = std::prev(MBB.end());
7483 
7484     // Insert a save before the outlined region
7485     MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
7486                                 .addReg(AArch64::SP, RegState::Define)
7487                                 .addReg(AArch64::LR)
7488                                 .addReg(AArch64::SP)
7489                                 .addImm(-16);
7490     It = MBB.insert(It, STRXpre);
7491 
7492     const TargetSubtargetInfo &STI = MF.getSubtarget();
7493     const MCRegisterInfo *MRI = STI.getRegisterInfo();
7494     unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true);
7495 
7496     // Add a CFI saying the stack was moved 16 B down.
7497     int64_t StackPosEntry =
7498         MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 16));
7499     BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
7500         .addCFIIndex(StackPosEntry)
7501         .setMIFlags(MachineInstr::FrameSetup);
7502 
7503     // Add a CFI saying that the LR that we want to find is now 16 B higher than
7504     // before.
7505     int64_t LRPosEntry =
7506         MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, -16));
7507     BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
7508         .addCFIIndex(LRPosEntry)
7509         .setMIFlags(MachineInstr::FrameSetup);
7510 
7511     // Insert a restore before the terminator for the function.
7512     MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
7513                                  .addReg(AArch64::SP, RegState::Define)
7514                                  .addReg(AArch64::LR, RegState::Define)
7515                                  .addReg(AArch64::SP)
7516                                  .addImm(16);
7517     Et = MBB.insert(Et, LDRXpost);
7518   }
7519 
7520   // If a bunch of candidates reach this point they must agree on their return
7521   // address signing. It is therefore enough to just consider the signing
7522   // behaviour of one of them
7523   const auto &MFI = *OF.Candidates.front().getMF()->getInfo<AArch64FunctionInfo>();
7524   bool ShouldSignReturnAddr = MFI.shouldSignReturnAddress(!IsLeafFunction);
7525 
7526   // a_key is the default
7527   bool ShouldSignReturnAddrWithAKey = !MFI.shouldSignWithBKey();
7528 
7529   // If this is a tail call outlined function, then there's already a return.
7530   if (OF.FrameConstructionID == MachineOutlinerTailCall ||
7531       OF.FrameConstructionID == MachineOutlinerThunk) {
7532     signOutlinedFunction(MF, MBB, ShouldSignReturnAddr,
7533                          ShouldSignReturnAddrWithAKey);
7534     return;
7535   }
7536 
7537   // It's not a tail call, so we have to insert the return ourselves.
7538 
7539   // LR has to be a live in so that we can return to it.
7540   if (!MBB.isLiveIn(AArch64::LR))
7541     MBB.addLiveIn(AArch64::LR);
7542 
7543   MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
7544                           .addReg(AArch64::LR);
7545   MBB.insert(MBB.end(), ret);
7546 
7547   signOutlinedFunction(MF, MBB, ShouldSignReturnAddr,
7548                        ShouldSignReturnAddrWithAKey);
7549 
7550   FI->setOutliningStyle("Function");
7551 
7552   // Did we have to modify the stack by saving the link register?
7553   if (OF.FrameConstructionID != MachineOutlinerDefault)
7554     return;
7555 
7556   // We modified the stack.
7557   // Walk over the basic block and fix up all the stack accesses.
7558   fixupPostOutline(MBB);
7559 }
7560 
7561 MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
7562     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
7563     MachineFunction &MF, const outliner::Candidate &C) const {
7564 
7565   // Are we tail calling?
7566   if (C.CallConstructionID == MachineOutlinerTailCall) {
7567     // If yes, then we can just branch to the label.
7568     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi))
7569                             .addGlobalAddress(M.getNamedValue(MF.getName()))
7570                             .addImm(0));
7571     return It;
7572   }
7573 
7574   // Are we saving the link register?
7575   if (C.CallConstructionID == MachineOutlinerNoLRSave ||
7576       C.CallConstructionID == MachineOutlinerThunk) {
7577     // No, so just insert the call.
7578     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
7579                             .addGlobalAddress(M.getNamedValue(MF.getName())));
7580     return It;
7581   }
7582 
7583   // We want to return the spot where we inserted the call.
7584   MachineBasicBlock::iterator CallPt;
7585 
7586   // Instructions for saving and restoring LR around the call instruction we're
7587   // going to insert.
7588   MachineInstr *Save;
7589   MachineInstr *Restore;
7590   // Can we save to a register?
7591   if (C.CallConstructionID == MachineOutlinerRegSave) {
7592     // FIXME: This logic should be sunk into a target-specific interface so that
7593     // we don't have to recompute the register.
7594     unsigned Reg = findRegisterToSaveLRTo(C);
7595     assert(Reg != 0 && "No callee-saved register available?");
7596 
7597     // LR has to be a live in so that we can save it.
7598     if (!MBB.isLiveIn(AArch64::LR))
7599       MBB.addLiveIn(AArch64::LR);
7600 
7601     // Save and restore LR from Reg.
7602     Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg)
7603                .addReg(AArch64::XZR)
7604                .addReg(AArch64::LR)
7605                .addImm(0);
7606     Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR)
7607                 .addReg(AArch64::XZR)
7608                 .addReg(Reg)
7609                 .addImm(0);
7610   } else {
7611     // We have the default case. Save and restore from SP.
7612     Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
7613                .addReg(AArch64::SP, RegState::Define)
7614                .addReg(AArch64::LR)
7615                .addReg(AArch64::SP)
7616                .addImm(-16);
7617     Restore = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
7618                   .addReg(AArch64::SP, RegState::Define)
7619                   .addReg(AArch64::LR, RegState::Define)
7620                   .addReg(AArch64::SP)
7621                   .addImm(16);
7622   }
7623 
7624   It = MBB.insert(It, Save);
7625   It++;
7626 
7627   // Insert the call.
7628   It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
7629                           .addGlobalAddress(M.getNamedValue(MF.getName())));
7630   CallPt = It;
7631   It++;
7632 
7633   It = MBB.insert(It, Restore);
7634   return CallPt;
7635 }
7636 
7637 bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
7638   MachineFunction &MF) const {
7639   return MF.getFunction().hasMinSize();
7640 }
7641 
7642 Optional<DestSourcePair>
7643 AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
7644 
7645   // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
7646   // and zero immediate operands used as an alias for mov instruction.
7647   if (MI.getOpcode() == AArch64::ORRWrs &&
7648       MI.getOperand(1).getReg() == AArch64::WZR &&
7649       MI.getOperand(3).getImm() == 0x0) {
7650     return DestSourcePair{MI.getOperand(0), MI.getOperand(2)};
7651   }
7652 
7653   if (MI.getOpcode() == AArch64::ORRXrs &&
7654       MI.getOperand(1).getReg() == AArch64::XZR &&
7655       MI.getOperand(3).getImm() == 0x0) {
7656     return DestSourcePair{MI.getOperand(0), MI.getOperand(2)};
7657   }
7658 
7659   return None;
7660 }
7661 
7662 Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
7663                                                       Register Reg) const {
7664   int Sign = 1;
7665   int64_t Offset = 0;
7666 
7667   // TODO: Handle cases where Reg is a super- or sub-register of the
7668   // destination register.
7669   const MachineOperand &Op0 = MI.getOperand(0);
7670   if (!Op0.isReg() || Reg != Op0.getReg())
7671     return None;
7672 
7673   switch (MI.getOpcode()) {
7674   default:
7675     return None;
7676   case AArch64::SUBWri:
7677   case AArch64::SUBXri:
7678   case AArch64::SUBSWri:
7679   case AArch64::SUBSXri:
7680     Sign *= -1;
7681     LLVM_FALLTHROUGH;
7682   case AArch64::ADDSWri:
7683   case AArch64::ADDSXri:
7684   case AArch64::ADDWri:
7685   case AArch64::ADDXri: {
7686     // TODO: Third operand can be global address (usually some string).
7687     if (!MI.getOperand(0).isReg() || !MI.getOperand(1).isReg() ||
7688         !MI.getOperand(2).isImm())
7689       return None;
7690     int Shift = MI.getOperand(3).getImm();
7691     assert((Shift == 0 || Shift == 12) && "Shift can be either 0 or 12");
7692     Offset = Sign * (MI.getOperand(2).getImm() << Shift);
7693   }
7694   }
7695   return RegImmPair{MI.getOperand(1).getReg(), Offset};
7696 }
7697 
7698 /// If the given ORR instruction is a copy, and \p DescribedReg overlaps with
7699 /// the destination register then, if possible, describe the value in terms of
7700 /// the source register.
7701 static Optional<ParamLoadedValue>
7702 describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
7703                        const TargetInstrInfo *TII,
7704                        const TargetRegisterInfo *TRI) {
7705   auto DestSrc = TII->isCopyInstr(MI);
7706   if (!DestSrc)
7707     return None;
7708 
7709   Register DestReg = DestSrc->Destination->getReg();
7710   Register SrcReg = DestSrc->Source->getReg();
7711 
7712   auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
7713 
7714   // If the described register is the destination, just return the source.
7715   if (DestReg == DescribedReg)
7716     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
7717 
7718   // ORRWrs zero-extends to 64-bits, so we need to consider such cases.
7719   if (MI.getOpcode() == AArch64::ORRWrs &&
7720       TRI->isSuperRegister(DestReg, DescribedReg))
7721     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
7722 
7723   // We may need to describe the lower part of a ORRXrs move.
7724   if (MI.getOpcode() == AArch64::ORRXrs &&
7725       TRI->isSubRegister(DestReg, DescribedReg)) {
7726     Register SrcSubReg = TRI->getSubReg(SrcReg, AArch64::sub_32);
7727     return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
7728   }
7729 
7730   assert(!TRI->isSuperOrSubRegisterEq(DestReg, DescribedReg) &&
7731          "Unhandled ORR[XW]rs copy case");
7732 
7733   return None;
7734 }
7735 
7736 Optional<ParamLoadedValue>
7737 AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
7738                                       Register Reg) const {
7739   const MachineFunction *MF = MI.getMF();
7740   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
7741   switch (MI.getOpcode()) {
7742   case AArch64::MOVZWi:
7743   case AArch64::MOVZXi: {
7744     // MOVZWi may be used for producing zero-extended 32-bit immediates in
7745     // 64-bit parameters, so we need to consider super-registers.
7746     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
7747       return None;
7748 
7749     if (!MI.getOperand(1).isImm())
7750       return None;
7751     int64_t Immediate = MI.getOperand(1).getImm();
7752     int Shift = MI.getOperand(2).getImm();
7753     return ParamLoadedValue(MachineOperand::CreateImm(Immediate << Shift),
7754                             nullptr);
7755   }
7756   case AArch64::ORRWrs:
7757   case AArch64::ORRXrs:
7758     return describeORRLoadedValue(MI, Reg, this, TRI);
7759   }
7760 
7761   return TargetInstrInfo::describeLoadedValue(MI, Reg);
7762 }
7763 
7764 bool AArch64InstrInfo::isExtendLikelyToBeFolded(
7765     MachineInstr &ExtMI, MachineRegisterInfo &MRI) const {
7766   assert(ExtMI.getOpcode() == TargetOpcode::G_SEXT ||
7767          ExtMI.getOpcode() == TargetOpcode::G_ZEXT ||
7768          ExtMI.getOpcode() == TargetOpcode::G_ANYEXT);
7769 
7770   // Anyexts are nops.
7771   if (ExtMI.getOpcode() == TargetOpcode::G_ANYEXT)
7772     return true;
7773 
7774   Register DefReg = ExtMI.getOperand(0).getReg();
7775   if (!MRI.hasOneNonDBGUse(DefReg))
7776     return false;
7777 
7778   // It's likely that a sext/zext as a G_PTR_ADD offset will be folded into an
7779   // addressing mode.
7780   auto *UserMI = &*MRI.use_instr_nodbg_begin(DefReg);
7781   return UserMI->getOpcode() == TargetOpcode::G_PTR_ADD;
7782 }
7783 
7784 uint64_t AArch64InstrInfo::getElementSizeForOpcode(unsigned Opc) const {
7785   return get(Opc).TSFlags & AArch64::ElementSizeMask;
7786 }
7787 
7788 bool AArch64InstrInfo::isPTestLikeOpcode(unsigned Opc) const {
7789   return get(Opc).TSFlags & AArch64::InstrFlagIsPTestLike;
7790 }
7791 
7792 bool AArch64InstrInfo::isWhileOpcode(unsigned Opc) const {
7793   return get(Opc).TSFlags & AArch64::InstrFlagIsWhile;
7794 }
7795 
7796 unsigned int
7797 AArch64InstrInfo::getTailDuplicateSize(CodeGenOpt::Level OptLevel) const {
7798   return OptLevel >= CodeGenOpt::Aggressive ? 6 : 2;
7799 }
7800 
7801 unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {
7802   if (MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr())
7803     return AArch64::BLRNoIP;
7804   else
7805     return AArch64::BLR;
7806 }
7807 
7808 #define GET_INSTRINFO_HELPERS
7809 #define GET_INSTRMAP_INFO
7810 #include "AArch64GenInstrInfo.inc"
7811