xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Hexagon implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "HexagonInstrInfo.h"
14 #include "Hexagon.h"
15 #include "HexagonFrameLowering.h"
16 #include "HexagonHazardRecognizer.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonSubtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/CodeGen/DFAPacketizer.h"
25 #include "llvm/CodeGen/LivePhysRegs.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineInstrBundle.h"
33 #include "llvm/CodeGen/MachineLoopInfo.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/MachineValueType.h"
38 #include "llvm/CodeGen/ScheduleDAG.h"
39 #include "llvm/CodeGen/TargetInstrInfo.h"
40 #include "llvm/CodeGen/TargetOpcodes.h"
41 #include "llvm/CodeGen/TargetRegisterInfo.h"
42 #include "llvm/CodeGen/TargetSubtargetInfo.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/MC/MCAsmInfo.h"
45 #include "llvm/MC/MCInstBuilder.h"
46 #include "llvm/MC/MCInstrDesc.h"
47 #include "llvm/MC/MCInstrItineraries.h"
48 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/BranchProbability.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Target/TargetMachine.h"
56 #include <cassert>
57 #include <cctype>
58 #include <cstdint>
59 #include <cstring>
60 #include <iterator>
61 #include <optional>
62 #include <string>
63 #include <utility>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "hexagon-instrinfo"
68 
69 #define GET_INSTRINFO_CTOR_DTOR
70 #define GET_INSTRMAP_INFO
71 #include "HexagonDepTimingClasses.h"
72 #include "HexagonGenDFAPacketizer.inc"
73 #include "HexagonGenInstrInfo.inc"
74 
75 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
76   cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
77                             "packetization boundary."));
78 
79 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
80   cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
81 
82 static cl::opt<bool> DisableNVSchedule(
83     "disable-hexagon-nv-schedule", cl::Hidden,
84     cl::desc("Disable schedule adjustment for new value stores."));
85 
86 static cl::opt<bool> EnableTimingClassLatency(
87   "enable-timing-class-latency", cl::Hidden, cl::init(false),
88   cl::desc("Enable timing class latency"));
89 
90 static cl::opt<bool> EnableALUForwarding(
91   "enable-alu-forwarding", cl::Hidden, cl::init(true),
92   cl::desc("Enable vec alu forwarding"));
93 
94 static cl::opt<bool> EnableACCForwarding(
95   "enable-acc-forwarding", cl::Hidden, cl::init(true),
96   cl::desc("Enable vec acc forwarding"));
97 
98 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
99                                          cl::init(true), cl::Hidden,
100                                          cl::desc("branch relax asm"));
101 
102 static cl::opt<bool>
103     UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden,
104                     cl::desc("Use the DFA based hazard recognizer."));
105 
106 /// Constants for Hexagon instructions.
107 const int Hexagon_MEMW_OFFSET_MAX = 4095;
108 const int Hexagon_MEMW_OFFSET_MIN = -4096;
109 const int Hexagon_MEMD_OFFSET_MAX = 8191;
110 const int Hexagon_MEMD_OFFSET_MIN = -8192;
111 const int Hexagon_MEMH_OFFSET_MAX = 2047;
112 const int Hexagon_MEMH_OFFSET_MIN = -2048;
113 const int Hexagon_MEMB_OFFSET_MAX = 1023;
114 const int Hexagon_MEMB_OFFSET_MIN = -1024;
115 const int Hexagon_ADDI_OFFSET_MAX = 32767;
116 const int Hexagon_ADDI_OFFSET_MIN = -32768;
117 
118 // Pin the vtable to this file.
119 void HexagonInstrInfo::anchor() {}
120 
121 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
122   : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
123     Subtarget(ST) {}
124 
125 namespace llvm {
126 namespace HexagonFUnits {
127   bool isSlot0Only(unsigned units);
128 }
129 }
130 
131 static bool isIntRegForSubInst(Register Reg) {
132   return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
133          (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
134 }
135 
136 static bool isDblRegForSubInst(Register Reg, const HexagonRegisterInfo &HRI) {
137   return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
138          isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
139 }
140 
141 /// Calculate number of instructions excluding the debug instructions.
142 static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
143                               MachineBasicBlock::const_instr_iterator MIE) {
144   unsigned Count = 0;
145   for (; MIB != MIE; ++MIB) {
146     if (!MIB->isDebugInstr())
147       ++Count;
148   }
149   return Count;
150 }
151 
152 // Check if the A2_tfrsi instruction is cheap or not. If the operand has
153 // to be constant-extendend it is not cheap since it occupies two slots
154 // in a packet.
155 bool HexagonInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
156   // Enable the following steps only at Os/Oz
157   if (!(MI.getMF()->getFunction().hasOptSize()))
158     return MI.isAsCheapAsAMove();
159 
160   if (MI.getOpcode() == Hexagon::A2_tfrsi) {
161     auto Op = MI.getOperand(1);
162     // If the instruction has a global address as operand, it is not cheap
163     // since the operand will be constant extended.
164     if (Op.isGlobal())
165       return false;
166     // If the instruction has an operand of size > 16bits, its will be
167     // const-extended and hence, it is not cheap.
168     if (Op.isImm()) {
169       int64_t Imm = Op.getImm();
170       if (!isInt<16>(Imm))
171         return false;
172     }
173   }
174   return MI.isAsCheapAsAMove();
175 }
176 
177 // Do not sink floating point instructions that updates USR register.
178 // Example:
179 //    feclearexcept
180 //    F2_conv_w2sf
181 //    fetestexcept
182 // MachineSink sinks F2_conv_w2sf and we are not able to catch exceptions.
183 // TODO: On some of these floating point instructions, USR is marked as Use.
184 // In reality, these instructions also Def the USR. If USR is marked as Def,
185 // some of the assumptions in assembler packetization are broken.
186 bool HexagonInstrInfo::shouldSink(const MachineInstr &MI) const {
187   // Assumption: A floating point instruction that reads the USR will write
188   // the USR as well.
189   if (isFloat(MI) && MI.hasRegisterImplicitUseOperand(Hexagon::USR))
190     return false;
191   return true;
192 }
193 
194 /// Find the hardware loop instruction used to set-up the specified loop.
195 /// On Hexagon, we have two instructions used to set-up the hardware loop
196 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
197 /// to indicate the end of a loop.
198 MachineInstr *HexagonInstrInfo::findLoopInstr(MachineBasicBlock *BB,
199       unsigned EndLoopOp, MachineBasicBlock *TargetBB,
200       SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
201   unsigned LOOPi;
202   unsigned LOOPr;
203   if (EndLoopOp == Hexagon::ENDLOOP0) {
204     LOOPi = Hexagon::J2_loop0i;
205     LOOPr = Hexagon::J2_loop0r;
206   } else { // EndLoopOp == Hexagon::EndLOOP1
207     LOOPi = Hexagon::J2_loop1i;
208     LOOPr = Hexagon::J2_loop1r;
209   }
210 
211   // The loop set-up instruction will be in a predecessor block
212   for (MachineBasicBlock *PB : BB->predecessors()) {
213     // If this has been visited, already skip it.
214     if (!Visited.insert(PB).second)
215       continue;
216     if (PB == BB)
217       continue;
218     for (MachineInstr &I : llvm::reverse(PB->instrs())) {
219       unsigned Opc = I.getOpcode();
220       if (Opc == LOOPi || Opc == LOOPr)
221         return &I;
222       // We've reached a different loop, which means the loop01 has been
223       // removed.
224       if (Opc == EndLoopOp && I.getOperand(0).getMBB() != TargetBB)
225         return nullptr;
226     }
227     // Check the predecessors for the LOOP instruction.
228     if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
229       return Loop;
230   }
231   return nullptr;
232 }
233 
234 /// Gather register def/uses from MI.
235 /// This treats possible (predicated) defs as actually happening ones
236 /// (conservatively).
237 static inline void parseOperands(const MachineInstr &MI,
238       SmallVectorImpl<Register> &Defs, SmallVectorImpl<Register> &Uses) {
239   Defs.clear();
240   Uses.clear();
241 
242   for (const MachineOperand &MO : MI.operands()) {
243     if (!MO.isReg())
244       continue;
245 
246     Register Reg = MO.getReg();
247     if (!Reg)
248       continue;
249 
250     if (MO.isUse())
251       Uses.push_back(MO.getReg());
252 
253     if (MO.isDef())
254       Defs.push_back(MO.getReg());
255   }
256 }
257 
258 // Position dependent, so check twice for swap.
259 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
260   switch (Ga) {
261   case HexagonII::HSIG_None:
262   default:
263     return false;
264   case HexagonII::HSIG_L1:
265     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
266   case HexagonII::HSIG_L2:
267     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
268             Gb == HexagonII::HSIG_A);
269   case HexagonII::HSIG_S1:
270     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
271             Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
272   case HexagonII::HSIG_S2:
273     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
274             Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
275             Gb == HexagonII::HSIG_A);
276   case HexagonII::HSIG_A:
277     return (Gb == HexagonII::HSIG_A);
278   case HexagonII::HSIG_Compound:
279     return (Gb == HexagonII::HSIG_Compound);
280   }
281   return false;
282 }
283 
284 /// isLoadFromStackSlot - If the specified machine instruction is a direct
285 /// load from a stack slot, return the virtual or physical register number of
286 /// the destination along with the FrameIndex of the loaded stack slot.  If
287 /// not, return 0.  This predicate must return 0 if the instruction has
288 /// any side effects other than loading from the stack slot.
289 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
290                                                int &FrameIndex) const {
291   switch (MI.getOpcode()) {
292     default:
293       break;
294     case Hexagon::L2_loadri_io:
295     case Hexagon::L2_loadrd_io:
296     case Hexagon::V6_vL32b_ai:
297     case Hexagon::V6_vL32b_nt_ai:
298     case Hexagon::V6_vL32Ub_ai:
299     case Hexagon::LDriw_pred:
300     case Hexagon::LDriw_ctr:
301     case Hexagon::PS_vloadrq_ai:
302     case Hexagon::PS_vloadrw_ai:
303     case Hexagon::PS_vloadrw_nt_ai: {
304       const MachineOperand OpFI = MI.getOperand(1);
305       if (!OpFI.isFI())
306         return 0;
307       const MachineOperand OpOff = MI.getOperand(2);
308       if (!OpOff.isImm() || OpOff.getImm() != 0)
309         return 0;
310       FrameIndex = OpFI.getIndex();
311       return MI.getOperand(0).getReg();
312     }
313 
314     case Hexagon::L2_ploadrit_io:
315     case Hexagon::L2_ploadrif_io:
316     case Hexagon::L2_ploadrdt_io:
317     case Hexagon::L2_ploadrdf_io: {
318       const MachineOperand OpFI = MI.getOperand(2);
319       if (!OpFI.isFI())
320         return 0;
321       const MachineOperand OpOff = MI.getOperand(3);
322       if (!OpOff.isImm() || OpOff.getImm() != 0)
323         return 0;
324       FrameIndex = OpFI.getIndex();
325       return MI.getOperand(0).getReg();
326     }
327   }
328 
329   return 0;
330 }
331 
332 /// isStoreToStackSlot - If the specified machine instruction is a direct
333 /// store to a stack slot, return the virtual or physical register number of
334 /// the source reg along with the FrameIndex of the loaded stack slot.  If
335 /// not, return 0.  This predicate must return 0 if the instruction has
336 /// any side effects other than storing to the stack slot.
337 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
338                                               int &FrameIndex) const {
339   switch (MI.getOpcode()) {
340     default:
341       break;
342     case Hexagon::S2_storerb_io:
343     case Hexagon::S2_storerh_io:
344     case Hexagon::S2_storeri_io:
345     case Hexagon::S2_storerd_io:
346     case Hexagon::V6_vS32b_ai:
347     case Hexagon::V6_vS32Ub_ai:
348     case Hexagon::STriw_pred:
349     case Hexagon::STriw_ctr:
350     case Hexagon::PS_vstorerq_ai:
351     case Hexagon::PS_vstorerw_ai: {
352       const MachineOperand &OpFI = MI.getOperand(0);
353       if (!OpFI.isFI())
354         return 0;
355       const MachineOperand &OpOff = MI.getOperand(1);
356       if (!OpOff.isImm() || OpOff.getImm() != 0)
357         return 0;
358       FrameIndex = OpFI.getIndex();
359       return MI.getOperand(2).getReg();
360     }
361 
362     case Hexagon::S2_pstorerbt_io:
363     case Hexagon::S2_pstorerbf_io:
364     case Hexagon::S2_pstorerht_io:
365     case Hexagon::S2_pstorerhf_io:
366     case Hexagon::S2_pstorerit_io:
367     case Hexagon::S2_pstorerif_io:
368     case Hexagon::S2_pstorerdt_io:
369     case Hexagon::S2_pstorerdf_io: {
370       const MachineOperand &OpFI = MI.getOperand(1);
371       if (!OpFI.isFI())
372         return 0;
373       const MachineOperand &OpOff = MI.getOperand(2);
374       if (!OpOff.isImm() || OpOff.getImm() != 0)
375         return 0;
376       FrameIndex = OpFI.getIndex();
377       return MI.getOperand(3).getReg();
378     }
379   }
380 
381   return 0;
382 }
383 
384 /// This function checks if the instruction or bundle of instructions
385 /// has load from stack slot and returns frameindex and machine memory
386 /// operand of that instruction if true.
387 bool HexagonInstrInfo::hasLoadFromStackSlot(
388     const MachineInstr &MI,
389     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
390   if (MI.isBundle()) {
391     const MachineBasicBlock *MBB = MI.getParent();
392     MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
393     for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
394       if (TargetInstrInfo::hasLoadFromStackSlot(*MII, Accesses))
395         return true;
396     return false;
397   }
398 
399   return TargetInstrInfo::hasLoadFromStackSlot(MI, Accesses);
400 }
401 
402 /// This function checks if the instruction or bundle of instructions
403 /// has store to stack slot and returns frameindex and machine memory
404 /// operand of that instruction if true.
405 bool HexagonInstrInfo::hasStoreToStackSlot(
406     const MachineInstr &MI,
407     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
408   if (MI.isBundle()) {
409     const MachineBasicBlock *MBB = MI.getParent();
410     MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
411     for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
412       if (TargetInstrInfo::hasStoreToStackSlot(*MII, Accesses))
413         return true;
414     return false;
415   }
416 
417   return TargetInstrInfo::hasStoreToStackSlot(MI, Accesses);
418 }
419 
420 /// This function can analyze one/two way branching only and should (mostly) be
421 /// called by target independent side.
422 /// First entry is always the opcode of the branching instruction, except when
423 /// the Cond vector is supposed to be empty, e.g., when analyzeBranch fails, a
424 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
425 /// e.g. Jump_c p will have
426 /// Cond[0] = Jump_c
427 /// Cond[1] = p
428 /// HW-loop ENDLOOP:
429 /// Cond[0] = ENDLOOP
430 /// Cond[1] = MBB
431 /// New value jump:
432 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
433 /// Cond[1] = R
434 /// Cond[2] = Imm
435 bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
436                                      MachineBasicBlock *&TBB,
437                                      MachineBasicBlock *&FBB,
438                                      SmallVectorImpl<MachineOperand> &Cond,
439                                      bool AllowModify) const {
440   TBB = nullptr;
441   FBB = nullptr;
442   Cond.clear();
443 
444   // If the block has no terminators, it just falls into the block after it.
445   MachineBasicBlock::instr_iterator I = MBB.instr_end();
446   if (I == MBB.instr_begin())
447     return false;
448 
449   // A basic block may looks like this:
450   //
451   //  [   insn
452   //     EH_LABEL
453   //      insn
454   //      insn
455   //      insn
456   //     EH_LABEL
457   //      insn     ]
458   //
459   // It has two succs but does not have a terminator
460   // Don't know how to handle it.
461   do {
462     --I;
463     if (I->isEHLabel())
464       // Don't analyze EH branches.
465       return true;
466   } while (I != MBB.instr_begin());
467 
468   I = MBB.instr_end();
469   --I;
470 
471   while (I->isDebugInstr()) {
472     if (I == MBB.instr_begin())
473       return false;
474     --I;
475   }
476 
477   bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
478                      I->getOperand(0).isMBB();
479   // Delete the J2_jump if it's equivalent to a fall-through.
480   if (AllowModify && JumpToBlock &&
481       MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
482     LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
483     I->eraseFromParent();
484     I = MBB.instr_end();
485     if (I == MBB.instr_begin())
486       return false;
487     --I;
488   }
489   if (!isUnpredicatedTerminator(*I))
490     return false;
491 
492   // Get the last instruction in the block.
493   MachineInstr *LastInst = &*I;
494   MachineInstr *SecondLastInst = nullptr;
495   // Find one more terminator if present.
496   while (true) {
497     if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
498       if (!SecondLastInst)
499         SecondLastInst = &*I;
500       else
501         // This is a third branch.
502         return true;
503     }
504     if (I == MBB.instr_begin())
505       break;
506     --I;
507   }
508 
509   int LastOpcode = LastInst->getOpcode();
510   int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
511   // If the branch target is not a basic block, it could be a tail call.
512   // (It is, if the target is a function.)
513   if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
514     return true;
515   if (SecLastOpcode == Hexagon::J2_jump &&
516       !SecondLastInst->getOperand(0).isMBB())
517     return true;
518 
519   bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
520   bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
521 
522   if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
523     return true;
524 
525   // If there is only one terminator instruction, process it.
526   if (LastInst && !SecondLastInst) {
527     if (LastOpcode == Hexagon::J2_jump) {
528       TBB = LastInst->getOperand(0).getMBB();
529       return false;
530     }
531     if (isEndLoopN(LastOpcode)) {
532       TBB = LastInst->getOperand(0).getMBB();
533       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
534       Cond.push_back(LastInst->getOperand(0));
535       return false;
536     }
537     if (LastOpcodeHasJMP_c) {
538       TBB = LastInst->getOperand(1).getMBB();
539       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
540       Cond.push_back(LastInst->getOperand(0));
541       return false;
542     }
543     // Only supporting rr/ri versions of new-value jumps.
544     if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
545       TBB = LastInst->getOperand(2).getMBB();
546       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
547       Cond.push_back(LastInst->getOperand(0));
548       Cond.push_back(LastInst->getOperand(1));
549       return false;
550     }
551     LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
552                       << " with one jump\n";);
553     // Otherwise, don't know what this is.
554     return true;
555   }
556 
557   bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
558   bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
559   if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
560     if (!SecondLastInst->getOperand(1).isMBB())
561       return true;
562     TBB =  SecondLastInst->getOperand(1).getMBB();
563     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
564     Cond.push_back(SecondLastInst->getOperand(0));
565     FBB = LastInst->getOperand(0).getMBB();
566     return false;
567   }
568 
569   // Only supporting rr/ri versions of new-value jumps.
570   if (SecLastOpcodeHasNVJump &&
571       (SecondLastInst->getNumExplicitOperands() == 3) &&
572       (LastOpcode == Hexagon::J2_jump)) {
573     TBB = SecondLastInst->getOperand(2).getMBB();
574     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
575     Cond.push_back(SecondLastInst->getOperand(0));
576     Cond.push_back(SecondLastInst->getOperand(1));
577     FBB = LastInst->getOperand(0).getMBB();
578     return false;
579   }
580 
581   // If the block ends with two Hexagon:JMPs, handle it.  The second one is not
582   // executed, so remove it.
583   if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
584     TBB = SecondLastInst->getOperand(0).getMBB();
585     I = LastInst->getIterator();
586     if (AllowModify)
587       I->eraseFromParent();
588     return false;
589   }
590 
591   // If the block ends with an ENDLOOP, and J2_jump, handle it.
592   if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
593     TBB = SecondLastInst->getOperand(0).getMBB();
594     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
595     Cond.push_back(SecondLastInst->getOperand(0));
596     FBB = LastInst->getOperand(0).getMBB();
597     return false;
598   }
599   LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
600                     << " with two jumps";);
601   // Otherwise, can't handle this.
602   return true;
603 }
604 
605 unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
606                                         int *BytesRemoved) const {
607   assert(!BytesRemoved && "code size not handled");
608 
609   LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
610   MachineBasicBlock::iterator I = MBB.end();
611   unsigned Count = 0;
612   while (I != MBB.begin()) {
613     --I;
614     if (I->isDebugInstr())
615       continue;
616     // Only removing branches from end of MBB.
617     if (!I->isBranch())
618       return Count;
619     if (Count && (I->getOpcode() == Hexagon::J2_jump))
620       llvm_unreachable("Malformed basic block: unconditional branch not last");
621     MBB.erase(&MBB.back());
622     I = MBB.end();
623     ++Count;
624   }
625   return Count;
626 }
627 
628 unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
629                                         MachineBasicBlock *TBB,
630                                         MachineBasicBlock *FBB,
631                                         ArrayRef<MachineOperand> Cond,
632                                         const DebugLoc &DL,
633                                         int *BytesAdded) const {
634   unsigned BOpc   = Hexagon::J2_jump;
635   unsigned BccOpc = Hexagon::J2_jumpt;
636   assert(validateBranchCond(Cond) && "Invalid branching condition");
637   assert(TBB && "insertBranch must not be told to insert a fallthrough");
638   assert(!BytesAdded && "code size not handled");
639 
640   // Check if reverseBranchCondition has asked to reverse this branch
641   // If we want to reverse the branch an odd number of times, we want
642   // J2_jumpf.
643   if (!Cond.empty() && Cond[0].isImm())
644     BccOpc = Cond[0].getImm();
645 
646   if (!FBB) {
647     if (Cond.empty()) {
648       // Due to a bug in TailMerging/CFG Optimization, we need to add a
649       // special case handling of a predicated jump followed by an
650       // unconditional jump. If not, Tail Merging and CFG Optimization go
651       // into an infinite loop.
652       MachineBasicBlock *NewTBB, *NewFBB;
653       SmallVector<MachineOperand, 4> Cond;
654       auto Term = MBB.getFirstTerminator();
655       if (Term != MBB.end() && isPredicated(*Term) &&
656           !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
657           MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
658         reverseBranchCondition(Cond);
659         removeBranch(MBB);
660         return insertBranch(MBB, TBB, nullptr, Cond, DL);
661       }
662       BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
663     } else if (isEndLoopN(Cond[0].getImm())) {
664       int EndLoopOp = Cond[0].getImm();
665       assert(Cond[1].isMBB());
666       // Since we're adding an ENDLOOP, there better be a LOOP instruction.
667       // Check for it, and change the BB target if needed.
668       SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
669       MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
670                                          VisitedBBs);
671       assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
672       Loop->getOperand(0).setMBB(TBB);
673       // Add the ENDLOOP after the finding the LOOP0.
674       BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
675     } else if (isNewValueJump(Cond[0].getImm())) {
676       assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
677       // New value jump
678       // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
679       // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
680       unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
681       LLVM_DEBUG(dbgs() << "\nInserting NVJump for "
682                         << printMBBReference(MBB););
683       if (Cond[2].isReg()) {
684         unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
685         BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
686           addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
687       } else if(Cond[2].isImm()) {
688         BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
689           addImm(Cond[2].getImm()).addMBB(TBB);
690       } else
691         llvm_unreachable("Invalid condition for branching");
692     } else {
693       assert((Cond.size() == 2) && "Malformed cond vector");
694       const MachineOperand &RO = Cond[1];
695       unsigned Flags = getUndefRegState(RO.isUndef());
696       BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
697     }
698     return 1;
699   }
700   assert((!Cond.empty()) &&
701          "Cond. cannot be empty when multiple branchings are required");
702   assert((!isNewValueJump(Cond[0].getImm())) &&
703          "NV-jump cannot be inserted with another branch");
704   // Special case for hardware loops.  The condition is a basic block.
705   if (isEndLoopN(Cond[0].getImm())) {
706     int EndLoopOp = Cond[0].getImm();
707     assert(Cond[1].isMBB());
708     // Since we're adding an ENDLOOP, there better be a LOOP instruction.
709     // Check for it, and change the BB target if needed.
710     SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
711     MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
712                                        VisitedBBs);
713     assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
714     Loop->getOperand(0).setMBB(TBB);
715     // Add the ENDLOOP after the finding the LOOP0.
716     BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
717   } else {
718     const MachineOperand &RO = Cond[1];
719     unsigned Flags = getUndefRegState(RO.isUndef());
720     BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
721   }
722   BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
723 
724   return 2;
725 }
726 
727 namespace {
728 class HexagonPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
729   MachineInstr *Loop, *EndLoop;
730   MachineFunction *MF;
731   const HexagonInstrInfo *TII;
732   int64_t TripCount;
733   Register LoopCount;
734   DebugLoc DL;
735 
736 public:
737   HexagonPipelinerLoopInfo(MachineInstr *Loop, MachineInstr *EndLoop)
738       : Loop(Loop), EndLoop(EndLoop), MF(Loop->getParent()->getParent()),
739         TII(MF->getSubtarget<HexagonSubtarget>().getInstrInfo()),
740         DL(Loop->getDebugLoc()) {
741     // Inspect the Loop instruction up-front, as it may be deleted when we call
742     // createTripCountGreaterCondition.
743     TripCount = Loop->getOpcode() == Hexagon::J2_loop0r
744                     ? -1
745                     : Loop->getOperand(1).getImm();
746     if (TripCount == -1)
747       LoopCount = Loop->getOperand(1).getReg();
748   }
749 
750   bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
751     // Only ignore the terminator.
752     return MI == EndLoop;
753   }
754 
755   std::optional<bool> createTripCountGreaterCondition(
756       int TC, MachineBasicBlock &MBB,
757       SmallVectorImpl<MachineOperand> &Cond) override {
758     if (TripCount == -1) {
759       // Check if we're done with the loop.
760       Register Done = TII->createVR(MF, MVT::i1);
761       MachineInstr *NewCmp = BuildMI(&MBB, DL,
762                                      TII->get(Hexagon::C2_cmpgtui), Done)
763                                  .addReg(LoopCount)
764                                  .addImm(TC);
765       Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
766       Cond.push_back(NewCmp->getOperand(0));
767       return {};
768     }
769 
770     return TripCount > TC;
771   }
772 
773   void setPreheader(MachineBasicBlock *NewPreheader) override {
774     NewPreheader->splice(NewPreheader->getFirstTerminator(), Loop->getParent(),
775                          Loop);
776   }
777 
778   void adjustTripCount(int TripCountAdjust) override {
779     // If the loop trip count is a compile-time value, then just change the
780     // value.
781     if (Loop->getOpcode() == Hexagon::J2_loop0i ||
782         Loop->getOpcode() == Hexagon::J2_loop1i) {
783       int64_t TripCount = Loop->getOperand(1).getImm() + TripCountAdjust;
784       assert(TripCount > 0 && "Can't create an empty or negative loop!");
785       Loop->getOperand(1).setImm(TripCount);
786       return;
787     }
788 
789     // The loop trip count is a run-time value. We generate code to subtract
790     // one from the trip count, and update the loop instruction.
791     Register LoopCount = Loop->getOperand(1).getReg();
792     Register NewLoopCount = TII->createVR(MF, MVT::i32);
793     BuildMI(*Loop->getParent(), Loop, Loop->getDebugLoc(),
794             TII->get(Hexagon::A2_addi), NewLoopCount)
795         .addReg(LoopCount)
796         .addImm(TripCountAdjust);
797     Loop->getOperand(1).setReg(NewLoopCount);
798   }
799 
800   void disposed() override { Loop->eraseFromParent(); }
801 };
802 } // namespace
803 
804 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
805 HexagonInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
806   // We really "analyze" only hardware loops right now.
807   MachineBasicBlock::iterator I = LoopBB->getFirstTerminator();
808 
809   if (I != LoopBB->end() && isEndLoopN(I->getOpcode())) {
810     SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
811     MachineInstr *LoopInst = findLoopInstr(
812         LoopBB, I->getOpcode(), I->getOperand(0).getMBB(), VisitedBBs);
813     if (LoopInst)
814       return std::make_unique<HexagonPipelinerLoopInfo>(LoopInst, &*I);
815   }
816   return nullptr;
817 }
818 
819 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
820       unsigned NumCycles, unsigned ExtraPredCycles,
821       BranchProbability Probability) const {
822   return nonDbgBBSize(&MBB) <= 3;
823 }
824 
825 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
826       unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
827       unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
828       const {
829   return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
830 }
831 
832 bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
833       unsigned NumInstrs, BranchProbability Probability) const {
834   return NumInstrs <= 4;
835 }
836 
837 static void getLiveInRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
838   SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
839   const MachineBasicBlock &B = *MI.getParent();
840   Regs.addLiveIns(B);
841   auto E = MachineBasicBlock::const_iterator(MI.getIterator());
842   for (auto I = B.begin(); I != E; ++I) {
843     Clobbers.clear();
844     Regs.stepForward(*I, Clobbers);
845   }
846 }
847 
848 static void getLiveOutRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
849   const MachineBasicBlock &B = *MI.getParent();
850   Regs.addLiveOuts(B);
851   auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
852   for (auto I = B.rbegin(); I != E; ++I)
853     Regs.stepBackward(*I);
854 }
855 
856 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
857                                    MachineBasicBlock::iterator I,
858                                    const DebugLoc &DL, MCRegister DestReg,
859                                    MCRegister SrcReg, bool KillSrc) const {
860   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
861   unsigned KillFlag = getKillRegState(KillSrc);
862 
863   if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
864     BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
865       .addReg(SrcReg, KillFlag);
866     return;
867   }
868   if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
869     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
870       .addReg(SrcReg, KillFlag);
871     return;
872   }
873   if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
874     // Map Pd = Ps to Pd = or(Ps, Ps).
875     BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
876       .addReg(SrcReg).addReg(SrcReg, KillFlag);
877     return;
878   }
879   if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
880       Hexagon::IntRegsRegClass.contains(SrcReg)) {
881     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
882       .addReg(SrcReg, KillFlag);
883     return;
884   }
885   if (Hexagon::IntRegsRegClass.contains(DestReg) &&
886       Hexagon::CtrRegsRegClass.contains(SrcReg)) {
887     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
888       .addReg(SrcReg, KillFlag);
889     return;
890   }
891   if (Hexagon::ModRegsRegClass.contains(DestReg) &&
892       Hexagon::IntRegsRegClass.contains(SrcReg)) {
893     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
894       .addReg(SrcReg, KillFlag);
895     return;
896   }
897   if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
898       Hexagon::IntRegsRegClass.contains(DestReg)) {
899     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
900       .addReg(SrcReg, KillFlag);
901     return;
902   }
903   if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
904       Hexagon::PredRegsRegClass.contains(DestReg)) {
905     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
906       .addReg(SrcReg, KillFlag);
907     return;
908   }
909   if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
910       Hexagon::IntRegsRegClass.contains(DestReg)) {
911     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
912       .addReg(SrcReg, KillFlag);
913     return;
914   }
915   if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
916     BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
917       addReg(SrcReg, KillFlag);
918     return;
919   }
920   if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
921     LivePhysRegs LiveAtMI(HRI);
922     getLiveInRegsAt(LiveAtMI, *I);
923     Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
924     Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
925     unsigned UndefLo = getUndefRegState(!LiveAtMI.contains(SrcLo));
926     unsigned UndefHi = getUndefRegState(!LiveAtMI.contains(SrcHi));
927     BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
928       .addReg(SrcHi, KillFlag | UndefHi)
929       .addReg(SrcLo, KillFlag | UndefLo);
930     return;
931   }
932   if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
933     BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
934       .addReg(SrcReg)
935       .addReg(SrcReg, KillFlag);
936     return;
937   }
938   if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
939       Hexagon::HvxVRRegClass.contains(DestReg)) {
940     llvm_unreachable("Unimplemented pred to vec");
941     return;
942   }
943   if (Hexagon::HvxQRRegClass.contains(DestReg) &&
944       Hexagon::HvxVRRegClass.contains(SrcReg)) {
945     llvm_unreachable("Unimplemented vec to pred");
946     return;
947   }
948 
949 #ifndef NDEBUG
950   // Show the invalid registers to ease debugging.
951   dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
952          << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
953 #endif
954   llvm_unreachable("Unimplemented");
955 }
956 
957 void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
958                                            MachineBasicBlock::iterator I,
959                                            Register SrcReg, bool isKill, int FI,
960                                            const TargetRegisterClass *RC,
961                                            const TargetRegisterInfo *TRI,
962                                            Register VReg) const {
963   DebugLoc DL = MBB.findDebugLoc(I);
964   MachineFunction &MF = *MBB.getParent();
965   MachineFrameInfo &MFI = MF.getFrameInfo();
966   unsigned KillFlag = getKillRegState(isKill);
967 
968   MachineMemOperand *MMO = MF.getMachineMemOperand(
969       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
970       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
971 
972   if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
973     BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
974       .addFrameIndex(FI).addImm(0)
975       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
976   } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
977     BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
978       .addFrameIndex(FI).addImm(0)
979       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
980   } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
981     BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
982       .addFrameIndex(FI).addImm(0)
983       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
984   } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
985     BuildMI(MBB, I, DL, get(Hexagon::STriw_ctr))
986       .addFrameIndex(FI).addImm(0)
987       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
988   } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
989     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
990       .addFrameIndex(FI).addImm(0)
991       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
992   } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
993     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerv_ai))
994       .addFrameIndex(FI).addImm(0)
995       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
996   } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
997     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerw_ai))
998       .addFrameIndex(FI).addImm(0)
999       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
1000   } else {
1001     llvm_unreachable("Unimplemented");
1002   }
1003 }
1004 
1005 void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1006                                             MachineBasicBlock::iterator I,
1007                                             Register DestReg, int FI,
1008                                             const TargetRegisterClass *RC,
1009                                             const TargetRegisterInfo *TRI,
1010                                             Register VReg) const {
1011   DebugLoc DL = MBB.findDebugLoc(I);
1012   MachineFunction &MF = *MBB.getParent();
1013   MachineFrameInfo &MFI = MF.getFrameInfo();
1014 
1015   MachineMemOperand *MMO = MF.getMachineMemOperand(
1016       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1017       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1018 
1019   if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
1020     BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
1021       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1022   } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
1023     BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
1024       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1025   } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
1026     BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
1027       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1028   } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
1029     BuildMI(MBB, I, DL, get(Hexagon::LDriw_ctr), DestReg)
1030       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1031   } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
1032     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
1033       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1034   } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
1035     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrv_ai), DestReg)
1036       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1037   } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1038     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrw_ai), DestReg)
1039       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1040   } else {
1041     llvm_unreachable("Can't store this register to stack slot");
1042   }
1043 }
1044 
1045 /// expandPostRAPseudo - This function is called for all pseudo instructions
1046 /// that remain after register allocation. Many pseudo instructions are
1047 /// created to help register allocation. This is the place to convert them
1048 /// into real instructions. The target can edit MI in place, or it can insert
1049 /// new instructions and erase MI. The function should return true if
1050 /// anything was changed.
1051 bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1052   MachineBasicBlock &MBB = *MI.getParent();
1053   MachineFunction &MF = *MBB.getParent();
1054   MachineRegisterInfo &MRI = MF.getRegInfo();
1055   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1056   LivePhysRegs LiveIn(HRI), LiveOut(HRI);
1057   DebugLoc DL = MI.getDebugLoc();
1058   unsigned Opc = MI.getOpcode();
1059 
1060   auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) {
1061     Register Mx = MI.getOperand(MxOp).getReg();
1062     Register CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1063     BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx)
1064         .add(MI.getOperand((HasImm ? 5 : 4)));
1065     auto MIB = BuildMI(MBB, MI, DL, get(Opc)).add(MI.getOperand(0))
1066         .add(MI.getOperand(1)).add(MI.getOperand(2)).add(MI.getOperand(3));
1067     if (HasImm)
1068       MIB.add(MI.getOperand(4));
1069     MIB.addReg(CSx, RegState::Implicit);
1070     MBB.erase(MI);
1071     return true;
1072   };
1073 
1074   auto UseAligned = [&](const MachineInstr &MI, Align NeedAlign) {
1075     if (MI.memoperands().empty())
1076       return false;
1077     return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
1078       return MMO->getAlign() >= NeedAlign;
1079     });
1080   };
1081 
1082   switch (Opc) {
1083     case Hexagon::PS_call_instrprof_custom: {
1084       auto Op0 = MI.getOperand(0);
1085       assert(Op0.isGlobal() &&
1086              "First operand must be a global containing handler name.");
1087       const GlobalValue *NameVar = Op0.getGlobal();
1088       const GlobalVariable *GV = dyn_cast<GlobalVariable>(NameVar);
1089       auto *Arr = cast<ConstantDataArray>(GV->getInitializer());
1090       StringRef NameStr = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
1091 
1092       MachineOperand &Op1 = MI.getOperand(1);
1093       // Set R0 with the imm value to be passed to the custom profiling handler.
1094       BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrsi), Hexagon::R0)
1095         .addImm(Op1.getImm());
1096       // The call to the custom handler is being treated as a special one as the
1097       // callee is responsible for saving and restoring all the registers
1098       // (including caller saved registers) it needs to modify. This is
1099       // done to reduce the impact of instrumentation on the code being
1100       // instrumented/profiled.
1101       // NOTE: R14, R15 and R28 are reserved for PLT handling. These registers
1102       // are in the Def list of the Hexagon::PS_call_instrprof_custom and
1103       // therefore will be handled appropriately duing register allocation.
1104 
1105       // TODO: It may be a good idea to add a separate pseudo instruction for
1106       // static relocation which doesn't need to reserve r14, r15 and r28.
1107 
1108       auto MIB = BuildMI(MBB, MI, DL, get(Hexagon::J2_call))
1109                  .addUse(Hexagon::R0, RegState::Implicit|RegState::InternalRead)
1110                  .addDef(Hexagon::R29, RegState::ImplicitDefine)
1111                  .addDef(Hexagon::R30, RegState::ImplicitDefine)
1112                  .addDef(Hexagon::R14, RegState::ImplicitDefine)
1113                  .addDef(Hexagon::R15, RegState::ImplicitDefine)
1114                  .addDef(Hexagon::R28, RegState::ImplicitDefine);
1115       const char *cstr = MF.createExternalSymbolName(NameStr);
1116       MIB.addExternalSymbol(cstr);
1117       MBB.erase(MI);
1118       return true;
1119     }
1120     case TargetOpcode::COPY: {
1121       MachineOperand &MD = MI.getOperand(0);
1122       MachineOperand &MS = MI.getOperand(1);
1123       MachineBasicBlock::iterator MBBI = MI.getIterator();
1124       if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1125         copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1126         std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1127       }
1128       MBB.erase(MBBI);
1129       return true;
1130     }
1131     case Hexagon::PS_aligna:
1132       BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1133           .addReg(HRI.getFrameRegister())
1134           .addImm(-MI.getOperand(1).getImm());
1135       MBB.erase(MI);
1136       return true;
1137     case Hexagon::V6_vassignp: {
1138       Register SrcReg = MI.getOperand(1).getReg();
1139       Register DstReg = MI.getOperand(0).getReg();
1140       Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1141       Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1142       getLiveInRegsAt(LiveIn, MI);
1143       unsigned UndefLo = getUndefRegState(!LiveIn.contains(SrcLo));
1144       unsigned UndefHi = getUndefRegState(!LiveIn.contains(SrcHi));
1145       unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1146       BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1147           .addReg(SrcHi, UndefHi)
1148           .addReg(SrcLo, Kill | UndefLo);
1149       MBB.erase(MI);
1150       return true;
1151     }
1152     case Hexagon::V6_lo: {
1153       Register SrcReg = MI.getOperand(1).getReg();
1154       Register DstReg = MI.getOperand(0).getReg();
1155       Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1156       copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1157       MBB.erase(MI);
1158       MRI.clearKillFlags(SrcSubLo);
1159       return true;
1160     }
1161     case Hexagon::V6_hi: {
1162       Register SrcReg = MI.getOperand(1).getReg();
1163       Register DstReg = MI.getOperand(0).getReg();
1164       Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1165       copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1166       MBB.erase(MI);
1167       MRI.clearKillFlags(SrcSubHi);
1168       return true;
1169     }
1170     case Hexagon::PS_vloadrv_ai: {
1171       Register DstReg = MI.getOperand(0).getReg();
1172       const MachineOperand &BaseOp = MI.getOperand(1);
1173       assert(BaseOp.getSubReg() == 0);
1174       int Offset = MI.getOperand(2).getImm();
1175       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1176       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1177                                                   : Hexagon::V6_vL32Ub_ai;
1178       BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
1179           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1180           .addImm(Offset)
1181           .cloneMemRefs(MI);
1182       MBB.erase(MI);
1183       return true;
1184     }
1185     case Hexagon::PS_vloadrw_ai: {
1186       Register DstReg = MI.getOperand(0).getReg();
1187       const MachineOperand &BaseOp = MI.getOperand(1);
1188       assert(BaseOp.getSubReg() == 0);
1189       int Offset = MI.getOperand(2).getImm();
1190       unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1191       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1192       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1193                                                   : Hexagon::V6_vL32Ub_ai;
1194       BuildMI(MBB, MI, DL, get(NewOpc),
1195               HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1196           .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1197           .addImm(Offset)
1198           .cloneMemRefs(MI);
1199       BuildMI(MBB, MI, DL, get(NewOpc),
1200               HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1201           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1202           .addImm(Offset + VecOffset)
1203           .cloneMemRefs(MI);
1204       MBB.erase(MI);
1205       return true;
1206     }
1207     case Hexagon::PS_vstorerv_ai: {
1208       const MachineOperand &SrcOp = MI.getOperand(2);
1209       assert(SrcOp.getSubReg() == 0);
1210       const MachineOperand &BaseOp = MI.getOperand(0);
1211       assert(BaseOp.getSubReg() == 0);
1212       int Offset = MI.getOperand(1).getImm();
1213       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1214       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1215                                                   : Hexagon::V6_vS32Ub_ai;
1216       BuildMI(MBB, MI, DL, get(NewOpc))
1217           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1218           .addImm(Offset)
1219           .addReg(SrcOp.getReg(), getRegState(SrcOp))
1220           .cloneMemRefs(MI);
1221       MBB.erase(MI);
1222       return true;
1223     }
1224     case Hexagon::PS_vstorerw_ai: {
1225       Register SrcReg = MI.getOperand(2).getReg();
1226       const MachineOperand &BaseOp = MI.getOperand(0);
1227       assert(BaseOp.getSubReg() == 0);
1228       int Offset = MI.getOperand(1).getImm();
1229       unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1230       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1231       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1232                                                   : Hexagon::V6_vS32Ub_ai;
1233       BuildMI(MBB, MI, DL, get(NewOpc))
1234           .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1235           .addImm(Offset)
1236           .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo))
1237           .cloneMemRefs(MI);
1238       BuildMI(MBB, MI, DL, get(NewOpc))
1239           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1240           .addImm(Offset + VecOffset)
1241           .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi))
1242           .cloneMemRefs(MI);
1243       MBB.erase(MI);
1244       return true;
1245     }
1246     case Hexagon::PS_true: {
1247       Register Reg = MI.getOperand(0).getReg();
1248       BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1249         .addReg(Reg, RegState::Undef)
1250         .addReg(Reg, RegState::Undef);
1251       MBB.erase(MI);
1252       return true;
1253     }
1254     case Hexagon::PS_false: {
1255       Register Reg = MI.getOperand(0).getReg();
1256       BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1257         .addReg(Reg, RegState::Undef)
1258         .addReg(Reg, RegState::Undef);
1259       MBB.erase(MI);
1260       return true;
1261     }
1262     case Hexagon::PS_qtrue: {
1263       BuildMI(MBB, MI, DL, get(Hexagon::V6_veqw), MI.getOperand(0).getReg())
1264         .addReg(Hexagon::V0, RegState::Undef)
1265         .addReg(Hexagon::V0, RegState::Undef);
1266       MBB.erase(MI);
1267       return true;
1268     }
1269     case Hexagon::PS_qfalse: {
1270       BuildMI(MBB, MI, DL, get(Hexagon::V6_vgtw), MI.getOperand(0).getReg())
1271         .addReg(Hexagon::V0, RegState::Undef)
1272         .addReg(Hexagon::V0, RegState::Undef);
1273       MBB.erase(MI);
1274       return true;
1275     }
1276     case Hexagon::PS_vdd0: {
1277       Register Vd = MI.getOperand(0).getReg();
1278       BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd)
1279         .addReg(Vd, RegState::Undef)
1280         .addReg(Vd, RegState::Undef);
1281       MBB.erase(MI);
1282       return true;
1283     }
1284     case Hexagon::PS_vmulw: {
1285       // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1286       Register DstReg = MI.getOperand(0).getReg();
1287       Register Src1Reg = MI.getOperand(1).getReg();
1288       Register Src2Reg = MI.getOperand(2).getReg();
1289       Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1290       Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1291       Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1292       Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1293       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1294               HRI.getSubReg(DstReg, Hexagon::isub_hi))
1295           .addReg(Src1SubHi)
1296           .addReg(Src2SubHi);
1297       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1298               HRI.getSubReg(DstReg, Hexagon::isub_lo))
1299           .addReg(Src1SubLo)
1300           .addReg(Src2SubLo);
1301       MBB.erase(MI);
1302       MRI.clearKillFlags(Src1SubHi);
1303       MRI.clearKillFlags(Src1SubLo);
1304       MRI.clearKillFlags(Src2SubHi);
1305       MRI.clearKillFlags(Src2SubLo);
1306       return true;
1307     }
1308     case Hexagon::PS_vmulw_acc: {
1309       // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1310       Register DstReg = MI.getOperand(0).getReg();
1311       Register Src1Reg = MI.getOperand(1).getReg();
1312       Register Src2Reg = MI.getOperand(2).getReg();
1313       Register Src3Reg = MI.getOperand(3).getReg();
1314       Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1315       Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1316       Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1317       Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1318       Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1319       Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1320       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1321               HRI.getSubReg(DstReg, Hexagon::isub_hi))
1322           .addReg(Src1SubHi)
1323           .addReg(Src2SubHi)
1324           .addReg(Src3SubHi);
1325       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1326               HRI.getSubReg(DstReg, Hexagon::isub_lo))
1327           .addReg(Src1SubLo)
1328           .addReg(Src2SubLo)
1329           .addReg(Src3SubLo);
1330       MBB.erase(MI);
1331       MRI.clearKillFlags(Src1SubHi);
1332       MRI.clearKillFlags(Src1SubLo);
1333       MRI.clearKillFlags(Src2SubHi);
1334       MRI.clearKillFlags(Src2SubLo);
1335       MRI.clearKillFlags(Src3SubHi);
1336       MRI.clearKillFlags(Src3SubLo);
1337       return true;
1338     }
1339     case Hexagon::PS_pselect: {
1340       const MachineOperand &Op0 = MI.getOperand(0);
1341       const MachineOperand &Op1 = MI.getOperand(1);
1342       const MachineOperand &Op2 = MI.getOperand(2);
1343       const MachineOperand &Op3 = MI.getOperand(3);
1344       Register Rd = Op0.getReg();
1345       Register Pu = Op1.getReg();
1346       Register Rs = Op2.getReg();
1347       Register Rt = Op3.getReg();
1348       DebugLoc DL = MI.getDebugLoc();
1349       unsigned K1 = getKillRegState(Op1.isKill());
1350       unsigned K2 = getKillRegState(Op2.isKill());
1351       unsigned K3 = getKillRegState(Op3.isKill());
1352       if (Rd != Rs)
1353         BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1354           .addReg(Pu, (Rd == Rt) ? K1 : 0)
1355           .addReg(Rs, K2);
1356       if (Rd != Rt)
1357         BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1358           .addReg(Pu, K1)
1359           .addReg(Rt, K3);
1360       MBB.erase(MI);
1361       return true;
1362     }
1363     case Hexagon::PS_vselect: {
1364       const MachineOperand &Op0 = MI.getOperand(0);
1365       const MachineOperand &Op1 = MI.getOperand(1);
1366       const MachineOperand &Op2 = MI.getOperand(2);
1367       const MachineOperand &Op3 = MI.getOperand(3);
1368       getLiveOutRegsAt(LiveOut, MI);
1369       bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1370       Register PReg = Op1.getReg();
1371       assert(Op1.getSubReg() == 0);
1372       unsigned PState = getRegState(Op1);
1373 
1374       if (Op0.getReg() != Op2.getReg()) {
1375         unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1376                                                   : PState;
1377         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1378                      .add(Op0)
1379                      .addReg(PReg, S)
1380                      .add(Op2);
1381         if (IsDestLive)
1382           T.addReg(Op0.getReg(), RegState::Implicit);
1383         IsDestLive = true;
1384       }
1385       if (Op0.getReg() != Op3.getReg()) {
1386         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1387                      .add(Op0)
1388                      .addReg(PReg, PState)
1389                      .add(Op3);
1390         if (IsDestLive)
1391           T.addReg(Op0.getReg(), RegState::Implicit);
1392       }
1393       MBB.erase(MI);
1394       return true;
1395     }
1396     case Hexagon::PS_wselect: {
1397       MachineOperand &Op0 = MI.getOperand(0);
1398       MachineOperand &Op1 = MI.getOperand(1);
1399       MachineOperand &Op2 = MI.getOperand(2);
1400       MachineOperand &Op3 = MI.getOperand(3);
1401       getLiveOutRegsAt(LiveOut, MI);
1402       bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1403       Register PReg = Op1.getReg();
1404       assert(Op1.getSubReg() == 0);
1405       unsigned PState = getRegState(Op1);
1406 
1407       if (Op0.getReg() != Op2.getReg()) {
1408         unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1409                                                   : PState;
1410         Register SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1411         Register SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1412         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1413                      .add(Op0)
1414                      .addReg(PReg, S)
1415                      .addReg(SrcHi)
1416                      .addReg(SrcLo);
1417         if (IsDestLive)
1418           T.addReg(Op0.getReg(), RegState::Implicit);
1419         IsDestLive = true;
1420       }
1421       if (Op0.getReg() != Op3.getReg()) {
1422         Register SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1423         Register SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1424         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1425                      .add(Op0)
1426                      .addReg(PReg, PState)
1427                      .addReg(SrcHi)
1428                      .addReg(SrcLo);
1429         if (IsDestLive)
1430           T.addReg(Op0.getReg(), RegState::Implicit);
1431       }
1432       MBB.erase(MI);
1433       return true;
1434     }
1435 
1436     case Hexagon::PS_crash: {
1437       // Generate a misaligned load that is guaranteed to cause a crash.
1438       class CrashPseudoSourceValue : public PseudoSourceValue {
1439       public:
1440         CrashPseudoSourceValue(const TargetMachine &TM)
1441             : PseudoSourceValue(TargetCustom, TM) {}
1442 
1443         bool isConstant(const MachineFrameInfo *) const override {
1444           return false;
1445         }
1446         bool isAliased(const MachineFrameInfo *) const override {
1447           return false;
1448         }
1449         bool mayAlias(const MachineFrameInfo *) const override {
1450           return false;
1451         }
1452         void printCustom(raw_ostream &OS) const override {
1453           OS << "MisalignedCrash";
1454         }
1455       };
1456 
1457       static const CrashPseudoSourceValue CrashPSV(MF.getTarget());
1458       MachineMemOperand *MMO = MF.getMachineMemOperand(
1459           MachinePointerInfo(&CrashPSV),
1460           MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8,
1461           Align(1));
1462       BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13)
1463         .addImm(0xBADC0FEE)  // Misaligned load.
1464         .addMemOperand(MMO);
1465       MBB.erase(MI);
1466       return true;
1467     }
1468 
1469     case Hexagon::PS_tailcall_i:
1470       MI.setDesc(get(Hexagon::J2_jump));
1471       return true;
1472     case Hexagon::PS_tailcall_r:
1473     case Hexagon::PS_jmpret:
1474       MI.setDesc(get(Hexagon::J2_jumpr));
1475       return true;
1476     case Hexagon::PS_jmprett:
1477       MI.setDesc(get(Hexagon::J2_jumprt));
1478       return true;
1479     case Hexagon::PS_jmpretf:
1480       MI.setDesc(get(Hexagon::J2_jumprf));
1481       return true;
1482     case Hexagon::PS_jmprettnewpt:
1483       MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1484       return true;
1485     case Hexagon::PS_jmpretfnewpt:
1486       MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1487       return true;
1488     case Hexagon::PS_jmprettnew:
1489       MI.setDesc(get(Hexagon::J2_jumprtnew));
1490       return true;
1491     case Hexagon::PS_jmpretfnew:
1492       MI.setDesc(get(Hexagon::J2_jumprfnew));
1493       return true;
1494 
1495     case Hexagon::PS_loadrub_pci:
1496       return RealCirc(Hexagon::L2_loadrub_pci, /*HasImm*/true,  /*MxOp*/4);
1497     case Hexagon::PS_loadrb_pci:
1498       return RealCirc(Hexagon::L2_loadrb_pci,  /*HasImm*/true,  /*MxOp*/4);
1499     case Hexagon::PS_loadruh_pci:
1500       return RealCirc(Hexagon::L2_loadruh_pci, /*HasImm*/true,  /*MxOp*/4);
1501     case Hexagon::PS_loadrh_pci:
1502       return RealCirc(Hexagon::L2_loadrh_pci,  /*HasImm*/true,  /*MxOp*/4);
1503     case Hexagon::PS_loadri_pci:
1504       return RealCirc(Hexagon::L2_loadri_pci,  /*HasImm*/true,  /*MxOp*/4);
1505     case Hexagon::PS_loadrd_pci:
1506       return RealCirc(Hexagon::L2_loadrd_pci,  /*HasImm*/true,  /*MxOp*/4);
1507     case Hexagon::PS_loadrub_pcr:
1508       return RealCirc(Hexagon::L2_loadrub_pcr, /*HasImm*/false, /*MxOp*/3);
1509     case Hexagon::PS_loadrb_pcr:
1510       return RealCirc(Hexagon::L2_loadrb_pcr,  /*HasImm*/false, /*MxOp*/3);
1511     case Hexagon::PS_loadruh_pcr:
1512       return RealCirc(Hexagon::L2_loadruh_pcr, /*HasImm*/false, /*MxOp*/3);
1513     case Hexagon::PS_loadrh_pcr:
1514       return RealCirc(Hexagon::L2_loadrh_pcr,  /*HasImm*/false, /*MxOp*/3);
1515     case Hexagon::PS_loadri_pcr:
1516       return RealCirc(Hexagon::L2_loadri_pcr,  /*HasImm*/false, /*MxOp*/3);
1517     case Hexagon::PS_loadrd_pcr:
1518       return RealCirc(Hexagon::L2_loadrd_pcr,  /*HasImm*/false, /*MxOp*/3);
1519     case Hexagon::PS_storerb_pci:
1520       return RealCirc(Hexagon::S2_storerb_pci, /*HasImm*/true,  /*MxOp*/3);
1521     case Hexagon::PS_storerh_pci:
1522       return RealCirc(Hexagon::S2_storerh_pci, /*HasImm*/true,  /*MxOp*/3);
1523     case Hexagon::PS_storerf_pci:
1524       return RealCirc(Hexagon::S2_storerf_pci, /*HasImm*/true,  /*MxOp*/3);
1525     case Hexagon::PS_storeri_pci:
1526       return RealCirc(Hexagon::S2_storeri_pci, /*HasImm*/true,  /*MxOp*/3);
1527     case Hexagon::PS_storerd_pci:
1528       return RealCirc(Hexagon::S2_storerd_pci, /*HasImm*/true,  /*MxOp*/3);
1529     case Hexagon::PS_storerb_pcr:
1530       return RealCirc(Hexagon::S2_storerb_pcr, /*HasImm*/false, /*MxOp*/2);
1531     case Hexagon::PS_storerh_pcr:
1532       return RealCirc(Hexagon::S2_storerh_pcr, /*HasImm*/false, /*MxOp*/2);
1533     case Hexagon::PS_storerf_pcr:
1534       return RealCirc(Hexagon::S2_storerf_pcr, /*HasImm*/false, /*MxOp*/2);
1535     case Hexagon::PS_storeri_pcr:
1536       return RealCirc(Hexagon::S2_storeri_pcr, /*HasImm*/false, /*MxOp*/2);
1537     case Hexagon::PS_storerd_pcr:
1538       return RealCirc(Hexagon::S2_storerd_pcr, /*HasImm*/false, /*MxOp*/2);
1539   }
1540 
1541   return false;
1542 }
1543 
1544 MachineBasicBlock::instr_iterator
1545 HexagonInstrInfo::expandVGatherPseudo(MachineInstr &MI) const {
1546   MachineBasicBlock &MBB = *MI.getParent();
1547   const DebugLoc &DL = MI.getDebugLoc();
1548   unsigned Opc = MI.getOpcode();
1549   MachineBasicBlock::iterator First;
1550 
1551   switch (Opc) {
1552     case Hexagon::V6_vgathermh_pseudo:
1553       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
1554                   .add(MI.getOperand(2))
1555                   .add(MI.getOperand(3))
1556                   .add(MI.getOperand(4));
1557       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1558           .add(MI.getOperand(0))
1559           .addImm(MI.getOperand(1).getImm())
1560           .addReg(Hexagon::VTMP);
1561       MBB.erase(MI);
1562       return First.getInstrIterator();
1563 
1564     case Hexagon::V6_vgathermw_pseudo:
1565       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
1566                   .add(MI.getOperand(2))
1567                   .add(MI.getOperand(3))
1568                   .add(MI.getOperand(4));
1569       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1570           .add(MI.getOperand(0))
1571           .addImm(MI.getOperand(1).getImm())
1572           .addReg(Hexagon::VTMP);
1573       MBB.erase(MI);
1574       return First.getInstrIterator();
1575 
1576     case Hexagon::V6_vgathermhw_pseudo:
1577       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
1578                   .add(MI.getOperand(2))
1579                   .add(MI.getOperand(3))
1580                   .add(MI.getOperand(4));
1581       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1582           .add(MI.getOperand(0))
1583           .addImm(MI.getOperand(1).getImm())
1584           .addReg(Hexagon::VTMP);
1585       MBB.erase(MI);
1586       return First.getInstrIterator();
1587 
1588     case Hexagon::V6_vgathermhq_pseudo:
1589       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
1590                   .add(MI.getOperand(2))
1591                   .add(MI.getOperand(3))
1592                   .add(MI.getOperand(4))
1593                   .add(MI.getOperand(5));
1594       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1595           .add(MI.getOperand(0))
1596           .addImm(MI.getOperand(1).getImm())
1597           .addReg(Hexagon::VTMP);
1598       MBB.erase(MI);
1599       return First.getInstrIterator();
1600 
1601     case Hexagon::V6_vgathermwq_pseudo:
1602       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
1603                   .add(MI.getOperand(2))
1604                   .add(MI.getOperand(3))
1605                   .add(MI.getOperand(4))
1606                   .add(MI.getOperand(5));
1607       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1608           .add(MI.getOperand(0))
1609           .addImm(MI.getOperand(1).getImm())
1610           .addReg(Hexagon::VTMP);
1611       MBB.erase(MI);
1612       return First.getInstrIterator();
1613 
1614     case Hexagon::V6_vgathermhwq_pseudo:
1615       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
1616                   .add(MI.getOperand(2))
1617                   .add(MI.getOperand(3))
1618                   .add(MI.getOperand(4))
1619                   .add(MI.getOperand(5));
1620       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1621           .add(MI.getOperand(0))
1622           .addImm(MI.getOperand(1).getImm())
1623           .addReg(Hexagon::VTMP);
1624       MBB.erase(MI);
1625       return First.getInstrIterator();
1626   }
1627 
1628   return MI.getIterator();
1629 }
1630 
1631 // We indicate that we want to reverse the branch by
1632 // inserting the reversed branching opcode.
1633 bool HexagonInstrInfo::reverseBranchCondition(
1634       SmallVectorImpl<MachineOperand> &Cond) const {
1635   if (Cond.empty())
1636     return true;
1637   assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1638   unsigned opcode = Cond[0].getImm();
1639   //unsigned temp;
1640   assert(get(opcode).isBranch() && "Should be a branching condition.");
1641   if (isEndLoopN(opcode))
1642     return true;
1643   unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1644   Cond[0].setImm(NewOpcode);
1645   return false;
1646 }
1647 
1648 void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1649       MachineBasicBlock::iterator MI) const {
1650   DebugLoc DL;
1651   BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1652 }
1653 
1654 bool HexagonInstrInfo::isPostIncrement(const MachineInstr &MI) const {
1655   return getAddrMode(MI) == HexagonII::PostInc;
1656 }
1657 
1658 // Returns true if an instruction is predicated irrespective of the predicate
1659 // sense. For example, all of the following will return true.
1660 // if (p0) R1 = add(R2, R3)
1661 // if (!p0) R1 = add(R2, R3)
1662 // if (p0.new) R1 = add(R2, R3)
1663 // if (!p0.new) R1 = add(R2, R3)
1664 // Note: New-value stores are not included here as in the current
1665 // implementation, we don't need to check their predicate sense.
1666 bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
1667   const uint64_t F = MI.getDesc().TSFlags;
1668   return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1669 }
1670 
1671 bool HexagonInstrInfo::PredicateInstruction(
1672     MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1673   if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1674       isEndLoopN(Cond[0].getImm())) {
1675     LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1676     return false;
1677   }
1678   int Opc = MI.getOpcode();
1679   assert (isPredicable(MI) && "Expected predicable instruction");
1680   bool invertJump = predOpcodeHasNot(Cond);
1681 
1682   // We have to predicate MI "in place", i.e. after this function returns,
1683   // MI will need to be transformed into a predicated form. To avoid com-
1684   // plicated manipulations with the operands (handling tied operands,
1685   // etc.), build a new temporary instruction, then overwrite MI with it.
1686 
1687   MachineBasicBlock &B = *MI.getParent();
1688   DebugLoc DL = MI.getDebugLoc();
1689   unsigned PredOpc = getCondOpcode(Opc, invertJump);
1690   MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1691   unsigned NOp = 0, NumOps = MI.getNumOperands();
1692   while (NOp < NumOps) {
1693     MachineOperand &Op = MI.getOperand(NOp);
1694     if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1695       break;
1696     T.add(Op);
1697     NOp++;
1698   }
1699 
1700   Register PredReg;
1701   unsigned PredRegPos, PredRegFlags;
1702   bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1703   (void)GotPredReg;
1704   assert(GotPredReg);
1705   T.addReg(PredReg, PredRegFlags);
1706   while (NOp < NumOps)
1707     T.add(MI.getOperand(NOp++));
1708 
1709   MI.setDesc(get(PredOpc));
1710   while (unsigned n = MI.getNumOperands())
1711     MI.removeOperand(n-1);
1712   for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1713     MI.addOperand(T->getOperand(i));
1714 
1715   MachineBasicBlock::instr_iterator TI = T->getIterator();
1716   B.erase(TI);
1717 
1718   MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1719   MRI.clearKillFlags(PredReg);
1720   return true;
1721 }
1722 
1723 bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1724       ArrayRef<MachineOperand> Pred2) const {
1725   // TODO: Fix this
1726   return false;
1727 }
1728 
1729 bool HexagonInstrInfo::ClobbersPredicate(MachineInstr &MI,
1730                                          std::vector<MachineOperand> &Pred,
1731                                          bool SkipDead) const {
1732   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1733 
1734   for (const MachineOperand &MO : MI.operands()) {
1735     if (MO.isReg()) {
1736       if (!MO.isDef())
1737         continue;
1738       const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1739       if (RC == &Hexagon::PredRegsRegClass) {
1740         Pred.push_back(MO);
1741         return true;
1742       }
1743       continue;
1744     } else if (MO.isRegMask()) {
1745       for (Register PR : Hexagon::PredRegsRegClass) {
1746         if (!MI.modifiesRegister(PR, &HRI))
1747           continue;
1748         Pred.push_back(MO);
1749         return true;
1750       }
1751     }
1752   }
1753   return false;
1754 }
1755 
1756 bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
1757   if (!MI.getDesc().isPredicable())
1758     return false;
1759 
1760   if (MI.isCall() || isTailCall(MI)) {
1761     if (!Subtarget.usePredicatedCalls())
1762       return false;
1763   }
1764 
1765   // HVX loads are not predicable on v60, but are on v62.
1766   if (!Subtarget.hasV62Ops()) {
1767     switch (MI.getOpcode()) {
1768       case Hexagon::V6_vL32b_ai:
1769       case Hexagon::V6_vL32b_pi:
1770       case Hexagon::V6_vL32b_ppu:
1771       case Hexagon::V6_vL32b_cur_ai:
1772       case Hexagon::V6_vL32b_cur_pi:
1773       case Hexagon::V6_vL32b_cur_ppu:
1774       case Hexagon::V6_vL32b_nt_ai:
1775       case Hexagon::V6_vL32b_nt_pi:
1776       case Hexagon::V6_vL32b_nt_ppu:
1777       case Hexagon::V6_vL32b_tmp_ai:
1778       case Hexagon::V6_vL32b_tmp_pi:
1779       case Hexagon::V6_vL32b_tmp_ppu:
1780       case Hexagon::V6_vL32b_nt_cur_ai:
1781       case Hexagon::V6_vL32b_nt_cur_pi:
1782       case Hexagon::V6_vL32b_nt_cur_ppu:
1783       case Hexagon::V6_vL32b_nt_tmp_ai:
1784       case Hexagon::V6_vL32b_nt_tmp_pi:
1785       case Hexagon::V6_vL32b_nt_tmp_ppu:
1786         return false;
1787     }
1788   }
1789   return true;
1790 }
1791 
1792 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1793                                             const MachineBasicBlock *MBB,
1794                                             const MachineFunction &MF) const {
1795   // Debug info is never a scheduling boundary. It's necessary to be explicit
1796   // due to the special treatment of IT instructions below, otherwise a
1797   // dbg_value followed by an IT will result in the IT instruction being
1798   // considered a scheduling hazard, which is wrong. It should be the actual
1799   // instruction preceding the dbg_value instruction(s), just like it is
1800   // when debug info is not present.
1801   if (MI.isDebugInstr())
1802     return false;
1803 
1804   // Throwing call is a boundary.
1805   if (MI.isCall()) {
1806     // Don't mess around with no return calls.
1807     if (doesNotReturn(MI))
1808       return true;
1809     // If any of the block's successors is a landing pad, this could be a
1810     // throwing call.
1811     for (auto *I : MBB->successors())
1812       if (I->isEHPad())
1813         return true;
1814   }
1815 
1816   // Terminators and labels can't be scheduled around.
1817   if (MI.getDesc().isTerminator() || MI.isPosition())
1818     return true;
1819 
1820   // INLINEASM_BR can jump to another block
1821   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1822     return true;
1823 
1824   if (MI.isInlineAsm() && !ScheduleInlineAsm)
1825     return true;
1826 
1827   return false;
1828 }
1829 
1830 /// Measure the specified inline asm to determine an approximation of its
1831 /// length.
1832 /// Comments (which run till the next SeparatorString or newline) do not
1833 /// count as an instruction.
1834 /// Any other non-whitespace text is considered an instruction, with
1835 /// multiple instructions separated by SeparatorString or newlines.
1836 /// Variable-length instructions are not handled here; this function
1837 /// may be overloaded in the target code to do that.
1838 /// Hexagon counts the number of ##'s and adjust for that many
1839 /// constant exenders.
1840 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1841                                               const MCAsmInfo &MAI,
1842                                               const TargetSubtargetInfo *STI) const {
1843   StringRef AStr(Str);
1844   // Count the number of instructions in the asm.
1845   bool atInsnStart = true;
1846   unsigned Length = 0;
1847   const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
1848   for (; *Str; ++Str) {
1849     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1850                                 strlen(MAI.getSeparatorString())) == 0)
1851       atInsnStart = true;
1852     if (atInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
1853       Length += MaxInstLength;
1854       atInsnStart = false;
1855     }
1856     if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1857                                MAI.getCommentString().size()) == 0)
1858       atInsnStart = false;
1859   }
1860 
1861   // Add to size number of constant extenders seen * 4.
1862   StringRef Occ("##");
1863   Length += AStr.count(Occ)*4;
1864   return Length;
1865 }
1866 
1867 ScheduleHazardRecognizer*
1868 HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1869       const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1870   if (UseDFAHazardRec)
1871     return new HexagonHazardRecognizer(II, this, Subtarget);
1872   return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1873 }
1874 
1875 /// For a comparison instruction, return the source registers in
1876 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1877 /// compares against in CmpValue. Return true if the comparison instruction
1878 /// can be analyzed.
1879 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1880                                       Register &SrcReg2, int64_t &Mask,
1881                                       int64_t &Value) const {
1882   unsigned Opc = MI.getOpcode();
1883 
1884   // Set mask and the first source register.
1885   switch (Opc) {
1886     case Hexagon::C2_cmpeq:
1887     case Hexagon::C2_cmpeqp:
1888     case Hexagon::C2_cmpgt:
1889     case Hexagon::C2_cmpgtp:
1890     case Hexagon::C2_cmpgtu:
1891     case Hexagon::C2_cmpgtup:
1892     case Hexagon::C4_cmpneq:
1893     case Hexagon::C4_cmplte:
1894     case Hexagon::C4_cmplteu:
1895     case Hexagon::C2_cmpeqi:
1896     case Hexagon::C2_cmpgti:
1897     case Hexagon::C2_cmpgtui:
1898     case Hexagon::C4_cmpneqi:
1899     case Hexagon::C4_cmplteui:
1900     case Hexagon::C4_cmpltei:
1901       SrcReg = MI.getOperand(1).getReg();
1902       Mask = ~0;
1903       break;
1904     case Hexagon::A4_cmpbeq:
1905     case Hexagon::A4_cmpbgt:
1906     case Hexagon::A4_cmpbgtu:
1907     case Hexagon::A4_cmpbeqi:
1908     case Hexagon::A4_cmpbgti:
1909     case Hexagon::A4_cmpbgtui:
1910       SrcReg = MI.getOperand(1).getReg();
1911       Mask = 0xFF;
1912       break;
1913     case Hexagon::A4_cmpheq:
1914     case Hexagon::A4_cmphgt:
1915     case Hexagon::A4_cmphgtu:
1916     case Hexagon::A4_cmpheqi:
1917     case Hexagon::A4_cmphgti:
1918     case Hexagon::A4_cmphgtui:
1919       SrcReg = MI.getOperand(1).getReg();
1920       Mask = 0xFFFF;
1921       break;
1922   }
1923 
1924   // Set the value/second source register.
1925   switch (Opc) {
1926     case Hexagon::C2_cmpeq:
1927     case Hexagon::C2_cmpeqp:
1928     case Hexagon::C2_cmpgt:
1929     case Hexagon::C2_cmpgtp:
1930     case Hexagon::C2_cmpgtu:
1931     case Hexagon::C2_cmpgtup:
1932     case Hexagon::A4_cmpbeq:
1933     case Hexagon::A4_cmpbgt:
1934     case Hexagon::A4_cmpbgtu:
1935     case Hexagon::A4_cmpheq:
1936     case Hexagon::A4_cmphgt:
1937     case Hexagon::A4_cmphgtu:
1938     case Hexagon::C4_cmpneq:
1939     case Hexagon::C4_cmplte:
1940     case Hexagon::C4_cmplteu:
1941       SrcReg2 = MI.getOperand(2).getReg();
1942       Value = 0;
1943       return true;
1944 
1945     case Hexagon::C2_cmpeqi:
1946     case Hexagon::C2_cmpgtui:
1947     case Hexagon::C2_cmpgti:
1948     case Hexagon::C4_cmpneqi:
1949     case Hexagon::C4_cmplteui:
1950     case Hexagon::C4_cmpltei:
1951     case Hexagon::A4_cmpbeqi:
1952     case Hexagon::A4_cmpbgti:
1953     case Hexagon::A4_cmpbgtui:
1954     case Hexagon::A4_cmpheqi:
1955     case Hexagon::A4_cmphgti:
1956     case Hexagon::A4_cmphgtui: {
1957       SrcReg2 = 0;
1958       const MachineOperand &Op2 = MI.getOperand(2);
1959       if (!Op2.isImm())
1960         return false;
1961       Value = MI.getOperand(2).getImm();
1962       return true;
1963     }
1964   }
1965 
1966   return false;
1967 }
1968 
1969 unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1970                                            const MachineInstr &MI,
1971                                            unsigned *PredCost) const {
1972   return getInstrTimingClassLatency(ItinData, MI);
1973 }
1974 
1975 DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1976     const TargetSubtargetInfo &STI) const {
1977   const InstrItineraryData *II = STI.getInstrItineraryData();
1978   return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1979 }
1980 
1981 // Inspired by this pair:
1982 //  %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
1983 //  S2_storeri_io %r29, 132, killed %r1; flags:  mem:ST4[FixedStack1]
1984 // Currently AA considers the addresses in these instructions to be aliasing.
1985 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
1986     const MachineInstr &MIa, const MachineInstr &MIb) const {
1987   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1988       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1989     return false;
1990 
1991   // Instructions that are pure loads, not loads and stores like memops are not
1992   // dependent.
1993   if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1994     return true;
1995 
1996   // Get the base register in MIa.
1997   unsigned BasePosA, OffsetPosA;
1998   if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
1999     return false;
2000   const MachineOperand &BaseA = MIa.getOperand(BasePosA);
2001   Register BaseRegA = BaseA.getReg();
2002   unsigned BaseSubA = BaseA.getSubReg();
2003 
2004   // Get the base register in MIb.
2005   unsigned BasePosB, OffsetPosB;
2006   if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
2007     return false;
2008   const MachineOperand &BaseB = MIb.getOperand(BasePosB);
2009   Register BaseRegB = BaseB.getReg();
2010   unsigned BaseSubB = BaseB.getSubReg();
2011 
2012   if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
2013     return false;
2014 
2015   // Get the access sizes.
2016   unsigned SizeA = getMemAccessSize(MIa);
2017   unsigned SizeB = getMemAccessSize(MIb);
2018 
2019   // Get the offsets. Handle immediates only for now.
2020   const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
2021   const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
2022   if (!MIa.getOperand(OffsetPosA).isImm() ||
2023       !MIb.getOperand(OffsetPosB).isImm())
2024     return false;
2025   int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
2026   int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
2027 
2028   // This is a mem access with the same base register and known offsets from it.
2029   // Reason about it.
2030   if (OffsetA > OffsetB) {
2031     uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
2032     return SizeB <= OffDiff;
2033   }
2034   if (OffsetA < OffsetB) {
2035     uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
2036     return SizeA <= OffDiff;
2037   }
2038 
2039   return false;
2040 }
2041 
2042 /// If the instruction is an increment of a constant value, return the amount.
2043 bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
2044       int &Value) const {
2045   if (isPostIncrement(MI)) {
2046     unsigned BasePos = 0, OffsetPos = 0;
2047     if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
2048       return false;
2049     const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
2050     if (OffsetOp.isImm()) {
2051       Value = OffsetOp.getImm();
2052       return true;
2053     }
2054   } else if (MI.getOpcode() == Hexagon::A2_addi) {
2055     const MachineOperand &AddOp = MI.getOperand(2);
2056     if (AddOp.isImm()) {
2057       Value = AddOp.getImm();
2058       return true;
2059     }
2060   }
2061 
2062   return false;
2063 }
2064 
2065 std::pair<unsigned, unsigned>
2066 HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2067   return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
2068                         TF & HexagonII::MO_Bitmasks);
2069 }
2070 
2071 ArrayRef<std::pair<unsigned, const char*>>
2072 HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2073   using namespace HexagonII;
2074 
2075   static const std::pair<unsigned, const char*> Flags[] = {
2076     {MO_PCREL,  "hexagon-pcrel"},
2077     {MO_GOT,    "hexagon-got"},
2078     {MO_LO16,   "hexagon-lo16"},
2079     {MO_HI16,   "hexagon-hi16"},
2080     {MO_GPREL,  "hexagon-gprel"},
2081     {MO_GDGOT,  "hexagon-gdgot"},
2082     {MO_GDPLT,  "hexagon-gdplt"},
2083     {MO_IE,     "hexagon-ie"},
2084     {MO_IEGOT,  "hexagon-iegot"},
2085     {MO_TPREL,  "hexagon-tprel"}
2086   };
2087   return ArrayRef(Flags);
2088 }
2089 
2090 ArrayRef<std::pair<unsigned, const char*>>
2091 HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
2092   using namespace HexagonII;
2093 
2094   static const std::pair<unsigned, const char*> Flags[] = {
2095     {HMOTF_ConstExtended, "hexagon-ext"}
2096   };
2097   return ArrayRef(Flags);
2098 }
2099 
2100 Register HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
2101   MachineRegisterInfo &MRI = MF->getRegInfo();
2102   const TargetRegisterClass *TRC;
2103   if (VT == MVT::i1) {
2104     TRC = &Hexagon::PredRegsRegClass;
2105   } else if (VT == MVT::i32 || VT == MVT::f32) {
2106     TRC = &Hexagon::IntRegsRegClass;
2107   } else if (VT == MVT::i64 || VT == MVT::f64) {
2108     TRC = &Hexagon::DoubleRegsRegClass;
2109   } else {
2110     llvm_unreachable("Cannot handle this register class");
2111   }
2112 
2113   Register NewReg = MRI.createVirtualRegister(TRC);
2114   return NewReg;
2115 }
2116 
2117 bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr &MI) const {
2118   return (getAddrMode(MI) == HexagonII::AbsoluteSet);
2119 }
2120 
2121 bool HexagonInstrInfo::isAccumulator(const MachineInstr &MI) const {
2122   const uint64_t F = MI.getDesc().TSFlags;
2123   return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
2124 }
2125 
2126 bool HexagonInstrInfo::isBaseImmOffset(const MachineInstr &MI) const {
2127   return getAddrMode(MI) == HexagonII::BaseImmOffset;
2128 }
2129 
2130 bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
2131   return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
2132          !MI.getDesc().mayStore() &&
2133          MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
2134          MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
2135          !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
2136 }
2137 
2138 // Return true if the instruction is a compound branch instruction.
2139 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
2140   return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
2141 }
2142 
2143 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
2144 // isFPImm and later getFPImm as well.
2145 bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
2146   const uint64_t F = MI.getDesc().TSFlags;
2147   unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2148   if (isExtended) // Instruction must be extended.
2149     return true;
2150 
2151   unsigned isExtendable =
2152     (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
2153   if (!isExtendable)
2154     return false;
2155 
2156   if (MI.isCall())
2157     return false;
2158 
2159   short ExtOpNum = getCExtOpNum(MI);
2160   const MachineOperand &MO = MI.getOperand(ExtOpNum);
2161   // Use MO operand flags to determine if MO
2162   // has the HMOTF_ConstExtended flag set.
2163   if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2164     return true;
2165   // If this is a Machine BB address we are talking about, and it is
2166   // not marked as extended, say so.
2167   if (MO.isMBB())
2168     return false;
2169 
2170   // We could be using an instruction with an extendable immediate and shoehorn
2171   // a global address into it. If it is a global address it will be constant
2172   // extended. We do this for COMBINE.
2173   if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
2174       MO.isJTI() || MO.isCPI() || MO.isFPImm())
2175     return true;
2176 
2177   // If the extendable operand is not 'Immediate' type, the instruction should
2178   // have 'isExtended' flag set.
2179   assert(MO.isImm() && "Extendable operand must be Immediate type");
2180 
2181   int64_t Value = MO.getImm();
2182   if ((F >> HexagonII::ExtentSignedPos) & HexagonII::ExtentSignedMask) {
2183     int32_t SValue = Value;
2184     int32_t MinValue = getMinValue(MI);
2185     int32_t MaxValue = getMaxValue(MI);
2186     return SValue < MinValue || SValue > MaxValue;
2187   }
2188   uint32_t UValue = Value;
2189   uint32_t MinValue = getMinValue(MI);
2190   uint32_t MaxValue = getMaxValue(MI);
2191   return UValue < MinValue || UValue > MaxValue;
2192 }
2193 
2194 bool HexagonInstrInfo::isDeallocRet(const MachineInstr &MI) const {
2195   switch (MI.getOpcode()) {
2196   case Hexagon::L4_return:
2197   case Hexagon::L4_return_t:
2198   case Hexagon::L4_return_f:
2199   case Hexagon::L4_return_tnew_pnt:
2200   case Hexagon::L4_return_fnew_pnt:
2201   case Hexagon::L4_return_tnew_pt:
2202   case Hexagon::L4_return_fnew_pt:
2203     return true;
2204   }
2205   return false;
2206 }
2207 
2208 // Return true when ConsMI uses a register defined by ProdMI.
2209 bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
2210       const MachineInstr &ConsMI) const {
2211   if (!ProdMI.getDesc().getNumDefs())
2212     return false;
2213   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
2214 
2215   SmallVector<Register, 4> DefsA;
2216   SmallVector<Register, 4> DefsB;
2217   SmallVector<Register, 8> UsesA;
2218   SmallVector<Register, 8> UsesB;
2219 
2220   parseOperands(ProdMI, DefsA, UsesA);
2221   parseOperands(ConsMI, DefsB, UsesB);
2222 
2223   for (auto &RegA : DefsA)
2224     for (auto &RegB : UsesB) {
2225       // True data dependency.
2226       if (RegA == RegB)
2227         return true;
2228 
2229       if (RegA.isPhysical() && llvm::is_contained(HRI.subregs(RegA), RegB))
2230         return true;
2231 
2232       if (RegB.isPhysical() && llvm::is_contained(HRI.subregs(RegB), RegA))
2233         return true;
2234     }
2235 
2236   return false;
2237 }
2238 
2239 // Returns true if the instruction is alread a .cur.
2240 bool HexagonInstrInfo::isDotCurInst(const MachineInstr &MI) const {
2241   switch (MI.getOpcode()) {
2242   case Hexagon::V6_vL32b_cur_pi:
2243   case Hexagon::V6_vL32b_cur_ai:
2244     return true;
2245   }
2246   return false;
2247 }
2248 
2249 // Returns true, if any one of the operands is a dot new
2250 // insn, whether it is predicated dot new or register dot new.
2251 bool HexagonInstrInfo::isDotNewInst(const MachineInstr &MI) const {
2252   if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
2253     return true;
2254 
2255   return false;
2256 }
2257 
2258 /// Symmetrical. See if these two instructions are fit for duplex pair.
2259 bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
2260       const MachineInstr &MIb) const {
2261   HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
2262   HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
2263   return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2264 }
2265 
2266 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2267   return (Opcode == Hexagon::ENDLOOP0 ||
2268           Opcode == Hexagon::ENDLOOP1);
2269 }
2270 
2271 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2272   switch(OpType) {
2273   case MachineOperand::MO_MachineBasicBlock:
2274   case MachineOperand::MO_GlobalAddress:
2275   case MachineOperand::MO_ExternalSymbol:
2276   case MachineOperand::MO_JumpTableIndex:
2277   case MachineOperand::MO_ConstantPoolIndex:
2278   case MachineOperand::MO_BlockAddress:
2279     return true;
2280   default:
2281     return false;
2282   }
2283 }
2284 
2285 bool HexagonInstrInfo::isExtendable(const MachineInstr &MI) const {
2286   const MCInstrDesc &MID = MI.getDesc();
2287   const uint64_t F = MID.TSFlags;
2288   if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
2289     return true;
2290 
2291   // TODO: This is largely obsolete now. Will need to be removed
2292   // in consecutive patches.
2293   switch (MI.getOpcode()) {
2294     // PS_fi and PS_fia remain special cases.
2295     case Hexagon::PS_fi:
2296     case Hexagon::PS_fia:
2297       return true;
2298     default:
2299       return false;
2300   }
2301   return  false;
2302 }
2303 
2304 // This returns true in two cases:
2305 // - The OP code itself indicates that this is an extended instruction.
2306 // - One of MOs has been marked with HMOTF_ConstExtended flag.
2307 bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
2308   // First check if this is permanently extended op code.
2309   const uint64_t F = MI.getDesc().TSFlags;
2310   if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
2311     return true;
2312   // Use MO operand flags to determine if one of MI's operands
2313   // has HMOTF_ConstExtended flag set.
2314   for (const MachineOperand &MO : MI.operands())
2315     if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2316       return true;
2317   return  false;
2318 }
2319 
2320 bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
2321   unsigned Opcode = MI.getOpcode();
2322   const uint64_t F = get(Opcode).TSFlags;
2323   return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2324 }
2325 
2326 // No V60 HVX VMEM with A_INDIRECT.
2327 bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
2328       const MachineInstr &J) const {
2329   if (!isHVXVec(I))
2330     return false;
2331   if (!I.mayLoad() && !I.mayStore())
2332     return false;
2333   return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2334 }
2335 
2336 bool HexagonInstrInfo::isIndirectCall(const MachineInstr &MI) const {
2337   switch (MI.getOpcode()) {
2338   case Hexagon::J2_callr:
2339   case Hexagon::J2_callrf:
2340   case Hexagon::J2_callrt:
2341   case Hexagon::PS_call_nr:
2342     return true;
2343   }
2344   return false;
2345 }
2346 
2347 bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr &MI) const {
2348   switch (MI.getOpcode()) {
2349   case Hexagon::L4_return:
2350   case Hexagon::L4_return_t:
2351   case Hexagon::L4_return_f:
2352   case Hexagon::L4_return_fnew_pnt:
2353   case Hexagon::L4_return_fnew_pt:
2354   case Hexagon::L4_return_tnew_pnt:
2355   case Hexagon::L4_return_tnew_pt:
2356     return true;
2357   }
2358   return false;
2359 }
2360 
2361 bool HexagonInstrInfo::isJumpR(const MachineInstr &MI) const {
2362   switch (MI.getOpcode()) {
2363   case Hexagon::J2_jumpr:
2364   case Hexagon::J2_jumprt:
2365   case Hexagon::J2_jumprf:
2366   case Hexagon::J2_jumprtnewpt:
2367   case Hexagon::J2_jumprfnewpt:
2368   case Hexagon::J2_jumprtnew:
2369   case Hexagon::J2_jumprfnew:
2370     return true;
2371   }
2372   return false;
2373 }
2374 
2375 // Return true if a given MI can accommodate given offset.
2376 // Use abs estimate as oppose to the exact number.
2377 // TODO: This will need to be changed to use MC level
2378 // definition of instruction extendable field size.
2379 bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI,
2380       unsigned offset) const {
2381   // This selection of jump instructions matches to that what
2382   // analyzeBranch can parse, plus NVJ.
2383   if (isNewValueJump(MI)) // r9:2
2384     return isInt<11>(offset);
2385 
2386   switch (MI.getOpcode()) {
2387   // Still missing Jump to address condition on register value.
2388   default:
2389     return false;
2390   case Hexagon::J2_jump: // bits<24> dst; // r22:2
2391   case Hexagon::J2_call:
2392   case Hexagon::PS_call_nr:
2393     return isInt<24>(offset);
2394   case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2395   case Hexagon::J2_jumpf:
2396   case Hexagon::J2_jumptnew:
2397   case Hexagon::J2_jumptnewpt:
2398   case Hexagon::J2_jumpfnew:
2399   case Hexagon::J2_jumpfnewpt:
2400   case Hexagon::J2_callt:
2401   case Hexagon::J2_callf:
2402     return isInt<17>(offset);
2403   case Hexagon::J2_loop0i:
2404   case Hexagon::J2_loop0iext:
2405   case Hexagon::J2_loop0r:
2406   case Hexagon::J2_loop0rext:
2407   case Hexagon::J2_loop1i:
2408   case Hexagon::J2_loop1iext:
2409   case Hexagon::J2_loop1r:
2410   case Hexagon::J2_loop1rext:
2411     return isInt<9>(offset);
2412   // TODO: Add all the compound branches here. Can we do this in Relation model?
2413   case Hexagon::J4_cmpeqi_tp0_jump_nt:
2414   case Hexagon::J4_cmpeqi_tp1_jump_nt:
2415   case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2416   case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2417     return isInt<11>(offset);
2418   }
2419 }
2420 
2421 bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
2422   // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2423   // resource, but all operands can be received late like an ALU instruction.
2424   return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2425 }
2426 
2427 bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
2428   unsigned Opcode = MI.getOpcode();
2429   return Opcode == Hexagon::J2_loop0i    ||
2430          Opcode == Hexagon::J2_loop0r    ||
2431          Opcode == Hexagon::J2_loop0iext ||
2432          Opcode == Hexagon::J2_loop0rext ||
2433          Opcode == Hexagon::J2_loop1i    ||
2434          Opcode == Hexagon::J2_loop1r    ||
2435          Opcode == Hexagon::J2_loop1iext ||
2436          Opcode == Hexagon::J2_loop1rext;
2437 }
2438 
2439 bool HexagonInstrInfo::isMemOp(const MachineInstr &MI) const {
2440   switch (MI.getOpcode()) {
2441     default: return false;
2442     case Hexagon::L4_iadd_memopw_io:
2443     case Hexagon::L4_isub_memopw_io:
2444     case Hexagon::L4_add_memopw_io:
2445     case Hexagon::L4_sub_memopw_io:
2446     case Hexagon::L4_and_memopw_io:
2447     case Hexagon::L4_or_memopw_io:
2448     case Hexagon::L4_iadd_memoph_io:
2449     case Hexagon::L4_isub_memoph_io:
2450     case Hexagon::L4_add_memoph_io:
2451     case Hexagon::L4_sub_memoph_io:
2452     case Hexagon::L4_and_memoph_io:
2453     case Hexagon::L4_or_memoph_io:
2454     case Hexagon::L4_iadd_memopb_io:
2455     case Hexagon::L4_isub_memopb_io:
2456     case Hexagon::L4_add_memopb_io:
2457     case Hexagon::L4_sub_memopb_io:
2458     case Hexagon::L4_and_memopb_io:
2459     case Hexagon::L4_or_memopb_io:
2460     case Hexagon::L4_ior_memopb_io:
2461     case Hexagon::L4_ior_memoph_io:
2462     case Hexagon::L4_ior_memopw_io:
2463     case Hexagon::L4_iand_memopb_io:
2464     case Hexagon::L4_iand_memoph_io:
2465     case Hexagon::L4_iand_memopw_io:
2466     return true;
2467   }
2468   return false;
2469 }
2470 
2471 bool HexagonInstrInfo::isNewValue(const MachineInstr &MI) const {
2472   const uint64_t F = MI.getDesc().TSFlags;
2473   return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2474 }
2475 
2476 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2477   const uint64_t F = get(Opcode).TSFlags;
2478   return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2479 }
2480 
2481 bool HexagonInstrInfo::isNewValueInst(const MachineInstr &MI) const {
2482   return isNewValueJump(MI) || isNewValueStore(MI);
2483 }
2484 
2485 bool HexagonInstrInfo::isNewValueJump(const MachineInstr &MI) const {
2486   return isNewValue(MI) && MI.isBranch();
2487 }
2488 
2489 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2490   return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2491 }
2492 
2493 bool HexagonInstrInfo::isNewValueStore(const MachineInstr &MI) const {
2494   const uint64_t F = MI.getDesc().TSFlags;
2495   return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2496 }
2497 
2498 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2499   const uint64_t F = get(Opcode).TSFlags;
2500   return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2501 }
2502 
2503 // Returns true if a particular operand is extendable for an instruction.
2504 bool HexagonInstrInfo::isOperandExtended(const MachineInstr &MI,
2505     unsigned OperandNum) const {
2506   const uint64_t F = MI.getDesc().TSFlags;
2507   return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2508           == OperandNum;
2509 }
2510 
2511 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
2512   const uint64_t F = MI.getDesc().TSFlags;
2513   assert(isPredicated(MI));
2514   return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2515 }
2516 
2517 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2518   const uint64_t F = get(Opcode).TSFlags;
2519   assert(isPredicated(Opcode));
2520   return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2521 }
2522 
2523 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
2524   const uint64_t F = MI.getDesc().TSFlags;
2525   return !((F >> HexagonII::PredicatedFalsePos) &
2526            HexagonII::PredicatedFalseMask);
2527 }
2528 
2529 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2530   const uint64_t F = get(Opcode).TSFlags;
2531   // Make sure that the instruction is predicated.
2532   assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
2533   return !((F >> HexagonII::PredicatedFalsePos) &
2534            HexagonII::PredicatedFalseMask);
2535 }
2536 
2537 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2538   const uint64_t F = get(Opcode).TSFlags;
2539   return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2540 }
2541 
2542 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2543   const uint64_t F = get(Opcode).TSFlags;
2544   return (F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2545 }
2546 
2547 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2548   const uint64_t F = get(Opcode).TSFlags;
2549   assert(get(Opcode).isBranch() &&
2550          (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2551   return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2552 }
2553 
2554 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr &MI) const {
2555   return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2556          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2557          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2558          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2559 }
2560 
2561 bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
2562   switch (MI.getOpcode()) {
2563   // Byte
2564   case Hexagon::L2_loadrb_io:
2565   case Hexagon::L4_loadrb_ur:
2566   case Hexagon::L4_loadrb_ap:
2567   case Hexagon::L2_loadrb_pr:
2568   case Hexagon::L2_loadrb_pbr:
2569   case Hexagon::L2_loadrb_pi:
2570   case Hexagon::L2_loadrb_pci:
2571   case Hexagon::L2_loadrb_pcr:
2572   case Hexagon::L2_loadbsw2_io:
2573   case Hexagon::L4_loadbsw2_ur:
2574   case Hexagon::L4_loadbsw2_ap:
2575   case Hexagon::L2_loadbsw2_pr:
2576   case Hexagon::L2_loadbsw2_pbr:
2577   case Hexagon::L2_loadbsw2_pi:
2578   case Hexagon::L2_loadbsw2_pci:
2579   case Hexagon::L2_loadbsw2_pcr:
2580   case Hexagon::L2_loadbsw4_io:
2581   case Hexagon::L4_loadbsw4_ur:
2582   case Hexagon::L4_loadbsw4_ap:
2583   case Hexagon::L2_loadbsw4_pr:
2584   case Hexagon::L2_loadbsw4_pbr:
2585   case Hexagon::L2_loadbsw4_pi:
2586   case Hexagon::L2_loadbsw4_pci:
2587   case Hexagon::L2_loadbsw4_pcr:
2588   case Hexagon::L4_loadrb_rr:
2589   case Hexagon::L2_ploadrbt_io:
2590   case Hexagon::L2_ploadrbt_pi:
2591   case Hexagon::L2_ploadrbf_io:
2592   case Hexagon::L2_ploadrbf_pi:
2593   case Hexagon::L2_ploadrbtnew_io:
2594   case Hexagon::L2_ploadrbfnew_io:
2595   case Hexagon::L4_ploadrbt_rr:
2596   case Hexagon::L4_ploadrbf_rr:
2597   case Hexagon::L4_ploadrbtnew_rr:
2598   case Hexagon::L4_ploadrbfnew_rr:
2599   case Hexagon::L2_ploadrbtnew_pi:
2600   case Hexagon::L2_ploadrbfnew_pi:
2601   case Hexagon::L4_ploadrbt_abs:
2602   case Hexagon::L4_ploadrbf_abs:
2603   case Hexagon::L4_ploadrbtnew_abs:
2604   case Hexagon::L4_ploadrbfnew_abs:
2605   case Hexagon::L2_loadrbgp:
2606   // Half
2607   case Hexagon::L2_loadrh_io:
2608   case Hexagon::L4_loadrh_ur:
2609   case Hexagon::L4_loadrh_ap:
2610   case Hexagon::L2_loadrh_pr:
2611   case Hexagon::L2_loadrh_pbr:
2612   case Hexagon::L2_loadrh_pi:
2613   case Hexagon::L2_loadrh_pci:
2614   case Hexagon::L2_loadrh_pcr:
2615   case Hexagon::L4_loadrh_rr:
2616   case Hexagon::L2_ploadrht_io:
2617   case Hexagon::L2_ploadrht_pi:
2618   case Hexagon::L2_ploadrhf_io:
2619   case Hexagon::L2_ploadrhf_pi:
2620   case Hexagon::L2_ploadrhtnew_io:
2621   case Hexagon::L2_ploadrhfnew_io:
2622   case Hexagon::L4_ploadrht_rr:
2623   case Hexagon::L4_ploadrhf_rr:
2624   case Hexagon::L4_ploadrhtnew_rr:
2625   case Hexagon::L4_ploadrhfnew_rr:
2626   case Hexagon::L2_ploadrhtnew_pi:
2627   case Hexagon::L2_ploadrhfnew_pi:
2628   case Hexagon::L4_ploadrht_abs:
2629   case Hexagon::L4_ploadrhf_abs:
2630   case Hexagon::L4_ploadrhtnew_abs:
2631   case Hexagon::L4_ploadrhfnew_abs:
2632   case Hexagon::L2_loadrhgp:
2633     return true;
2634   default:
2635     return false;
2636   }
2637 }
2638 
2639 bool HexagonInstrInfo::isSolo(const MachineInstr &MI) const {
2640   const uint64_t F = MI.getDesc().TSFlags;
2641   return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2642 }
2643 
2644 bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr &MI) const {
2645   switch (MI.getOpcode()) {
2646   case Hexagon::STriw_pred:
2647   case Hexagon::LDriw_pred:
2648     return true;
2649   default:
2650     return false;
2651   }
2652 }
2653 
2654 bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
2655   if (!MI.isBranch())
2656     return false;
2657 
2658   for (auto &Op : MI.operands())
2659     if (Op.isGlobal() || Op.isSymbol())
2660       return true;
2661   return false;
2662 }
2663 
2664 // Returns true when SU has a timing class TC1.
2665 bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
2666   unsigned SchedClass = MI.getDesc().getSchedClass();
2667   return is_TC1(SchedClass);
2668 }
2669 
2670 bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
2671   unsigned SchedClass = MI.getDesc().getSchedClass();
2672   return is_TC2(SchedClass);
2673 }
2674 
2675 bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
2676   unsigned SchedClass = MI.getDesc().getSchedClass();
2677   return is_TC2early(SchedClass);
2678 }
2679 
2680 bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
2681   unsigned SchedClass = MI.getDesc().getSchedClass();
2682   return is_TC4x(SchedClass);
2683 }
2684 
2685 // Schedule this ASAP.
2686 bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
2687       const MachineInstr &MI2) const {
2688   if (mayBeCurLoad(MI1)) {
2689     // if (result of SU is used in Next) return true;
2690     Register DstReg = MI1.getOperand(0).getReg();
2691     int N = MI2.getNumOperands();
2692     for (int I = 0; I < N; I++)
2693       if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2694         return true;
2695   }
2696   if (mayBeNewStore(MI2))
2697     if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2698       if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2699           MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2700         return true;
2701   return false;
2702 }
2703 
2704 bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
2705   const uint64_t V = getType(MI);
2706   return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2707 }
2708 
2709 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2710 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, int Offset) const {
2711   int Size = VT.getSizeInBits() / 8;
2712   if (Offset % Size != 0)
2713     return false;
2714   int Count = Offset / Size;
2715 
2716   switch (VT.getSimpleVT().SimpleTy) {
2717     // For scalars the auto-inc is s4
2718     case MVT::i8:
2719     case MVT::i16:
2720     case MVT::i32:
2721     case MVT::i64:
2722     case MVT::f32:
2723     case MVT::f64:
2724     case MVT::v2i16:
2725     case MVT::v2i32:
2726     case MVT::v4i8:
2727     case MVT::v4i16:
2728     case MVT::v8i8:
2729       return isInt<4>(Count);
2730     // For HVX vectors the auto-inc is s3
2731     case MVT::v64i8:
2732     case MVT::v32i16:
2733     case MVT::v16i32:
2734     case MVT::v8i64:
2735     case MVT::v128i8:
2736     case MVT::v64i16:
2737     case MVT::v32i32:
2738     case MVT::v16i64:
2739       return isInt<3>(Count);
2740     default:
2741       break;
2742   }
2743 
2744   llvm_unreachable("Not an valid type!");
2745 }
2746 
2747 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2748       const TargetRegisterInfo *TRI, bool Extend) const {
2749   // This function is to check whether the "Offset" is in the correct range of
2750   // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2751   // inserted to calculate the final address. Due to this reason, the function
2752   // assumes that the "Offset" has correct alignment.
2753   // We used to assert if the offset was not properly aligned, however,
2754   // there are cases where a misaligned pointer recast can cause this
2755   // problem, and we need to allow for it. The front end warns of such
2756   // misaligns with respect to load size.
2757   switch (Opcode) {
2758   case Hexagon::PS_vstorerq_ai:
2759   case Hexagon::PS_vstorerv_ai:
2760   case Hexagon::PS_vstorerw_ai:
2761   case Hexagon::PS_vstorerw_nt_ai:
2762   case Hexagon::PS_vloadrq_ai:
2763   case Hexagon::PS_vloadrv_ai:
2764   case Hexagon::PS_vloadrw_ai:
2765   case Hexagon::PS_vloadrw_nt_ai:
2766   case Hexagon::V6_vL32b_ai:
2767   case Hexagon::V6_vS32b_ai:
2768   case Hexagon::V6_vS32b_qpred_ai:
2769   case Hexagon::V6_vS32b_nqpred_ai:
2770   case Hexagon::V6_vL32b_nt_ai:
2771   case Hexagon::V6_vS32b_nt_ai:
2772   case Hexagon::V6_vL32Ub_ai:
2773   case Hexagon::V6_vS32Ub_ai:
2774   case Hexagon::V6_vgathermh_pseudo:
2775   case Hexagon::V6_vgathermw_pseudo:
2776   case Hexagon::V6_vgathermhw_pseudo:
2777   case Hexagon::V6_vgathermhq_pseudo:
2778   case Hexagon::V6_vgathermwq_pseudo:
2779   case Hexagon::V6_vgathermhwq_pseudo: {
2780     unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2781     assert(isPowerOf2_32(VectorSize));
2782     if (Offset & (VectorSize-1))
2783       return false;
2784     return isInt<4>(Offset >> Log2_32(VectorSize));
2785   }
2786 
2787   case Hexagon::J2_loop0i:
2788   case Hexagon::J2_loop1i:
2789     return isUInt<10>(Offset);
2790 
2791   case Hexagon::S4_storeirb_io:
2792   case Hexagon::S4_storeirbt_io:
2793   case Hexagon::S4_storeirbf_io:
2794     return isUInt<6>(Offset);
2795 
2796   case Hexagon::S4_storeirh_io:
2797   case Hexagon::S4_storeirht_io:
2798   case Hexagon::S4_storeirhf_io:
2799     return isShiftedUInt<6,1>(Offset);
2800 
2801   case Hexagon::S4_storeiri_io:
2802   case Hexagon::S4_storeirit_io:
2803   case Hexagon::S4_storeirif_io:
2804     return isShiftedUInt<6,2>(Offset);
2805   // Handle these two compare instructions that are not extendable.
2806   case Hexagon::A4_cmpbeqi:
2807     return isUInt<8>(Offset);
2808   case Hexagon::A4_cmpbgti:
2809     return isInt<8>(Offset);
2810   }
2811 
2812   if (Extend)
2813     return true;
2814 
2815   switch (Opcode) {
2816   case Hexagon::L2_loadri_io:
2817   case Hexagon::S2_storeri_io:
2818     return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2819       (Offset <= Hexagon_MEMW_OFFSET_MAX);
2820 
2821   case Hexagon::L2_loadrd_io:
2822   case Hexagon::S2_storerd_io:
2823     return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2824       (Offset <= Hexagon_MEMD_OFFSET_MAX);
2825 
2826   case Hexagon::L2_loadrh_io:
2827   case Hexagon::L2_loadruh_io:
2828   case Hexagon::S2_storerh_io:
2829   case Hexagon::S2_storerf_io:
2830     return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2831       (Offset <= Hexagon_MEMH_OFFSET_MAX);
2832 
2833   case Hexagon::L2_loadrb_io:
2834   case Hexagon::L2_loadrub_io:
2835   case Hexagon::S2_storerb_io:
2836     return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2837       (Offset <= Hexagon_MEMB_OFFSET_MAX);
2838 
2839   case Hexagon::A2_addi:
2840     return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2841       (Offset <= Hexagon_ADDI_OFFSET_MAX);
2842 
2843   case Hexagon::L4_iadd_memopw_io:
2844   case Hexagon::L4_isub_memopw_io:
2845   case Hexagon::L4_add_memopw_io:
2846   case Hexagon::L4_sub_memopw_io:
2847   case Hexagon::L4_iand_memopw_io:
2848   case Hexagon::L4_ior_memopw_io:
2849   case Hexagon::L4_and_memopw_io:
2850   case Hexagon::L4_or_memopw_io:
2851     return (0 <= Offset && Offset <= 255);
2852 
2853   case Hexagon::L4_iadd_memoph_io:
2854   case Hexagon::L4_isub_memoph_io:
2855   case Hexagon::L4_add_memoph_io:
2856   case Hexagon::L4_sub_memoph_io:
2857   case Hexagon::L4_iand_memoph_io:
2858   case Hexagon::L4_ior_memoph_io:
2859   case Hexagon::L4_and_memoph_io:
2860   case Hexagon::L4_or_memoph_io:
2861     return (0 <= Offset && Offset <= 127);
2862 
2863   case Hexagon::L4_iadd_memopb_io:
2864   case Hexagon::L4_isub_memopb_io:
2865   case Hexagon::L4_add_memopb_io:
2866   case Hexagon::L4_sub_memopb_io:
2867   case Hexagon::L4_iand_memopb_io:
2868   case Hexagon::L4_ior_memopb_io:
2869   case Hexagon::L4_and_memopb_io:
2870   case Hexagon::L4_or_memopb_io:
2871     return (0 <= Offset && Offset <= 63);
2872 
2873   // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2874   // any size. Later pass knows how to handle it.
2875   case Hexagon::STriw_pred:
2876   case Hexagon::LDriw_pred:
2877   case Hexagon::STriw_ctr:
2878   case Hexagon::LDriw_ctr:
2879     return true;
2880 
2881   case Hexagon::PS_fi:
2882   case Hexagon::PS_fia:
2883   case Hexagon::INLINEASM:
2884     return true;
2885 
2886   case Hexagon::L2_ploadrbt_io:
2887   case Hexagon::L2_ploadrbf_io:
2888   case Hexagon::L2_ploadrubt_io:
2889   case Hexagon::L2_ploadrubf_io:
2890   case Hexagon::S2_pstorerbt_io:
2891   case Hexagon::S2_pstorerbf_io:
2892     return isUInt<6>(Offset);
2893 
2894   case Hexagon::L2_ploadrht_io:
2895   case Hexagon::L2_ploadrhf_io:
2896   case Hexagon::L2_ploadruht_io:
2897   case Hexagon::L2_ploadruhf_io:
2898   case Hexagon::S2_pstorerht_io:
2899   case Hexagon::S2_pstorerhf_io:
2900     return isShiftedUInt<6,1>(Offset);
2901 
2902   case Hexagon::L2_ploadrit_io:
2903   case Hexagon::L2_ploadrif_io:
2904   case Hexagon::S2_pstorerit_io:
2905   case Hexagon::S2_pstorerif_io:
2906     return isShiftedUInt<6,2>(Offset);
2907 
2908   case Hexagon::L2_ploadrdt_io:
2909   case Hexagon::L2_ploadrdf_io:
2910   case Hexagon::S2_pstorerdt_io:
2911   case Hexagon::S2_pstorerdf_io:
2912     return isShiftedUInt<6,3>(Offset);
2913 
2914   case Hexagon::L2_loadbsw2_io:
2915   case Hexagon::L2_loadbzw2_io:
2916     return isShiftedInt<11,1>(Offset);
2917 
2918   case Hexagon::L2_loadbsw4_io:
2919   case Hexagon::L2_loadbzw4_io:
2920     return isShiftedInt<11,2>(Offset);
2921   } // switch
2922 
2923   dbgs() << "Failed Opcode is : " << Opcode << " (" << getName(Opcode)
2924          << ")\n";
2925   llvm_unreachable("No offset range is defined for this opcode. "
2926                    "Please define it in the above switch statement!");
2927 }
2928 
2929 bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
2930   return isHVXVec(MI) && isAccumulator(MI);
2931 }
2932 
2933 bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
2934   const uint64_t F = get(MI.getOpcode()).TSFlags;
2935   const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2936   return
2937     V == HexagonII::TypeCVI_VA         ||
2938     V == HexagonII::TypeCVI_VA_DV;
2939 }
2940 
2941 bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr &ProdMI,
2942       const MachineInstr &ConsMI) const {
2943   if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2944     return true;
2945 
2946   if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2947     return true;
2948 
2949   if (mayBeNewStore(ConsMI))
2950     return true;
2951 
2952   return false;
2953 }
2954 
2955 bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
2956   switch (MI.getOpcode()) {
2957   // Byte
2958   case Hexagon::L2_loadrub_io:
2959   case Hexagon::L4_loadrub_ur:
2960   case Hexagon::L4_loadrub_ap:
2961   case Hexagon::L2_loadrub_pr:
2962   case Hexagon::L2_loadrub_pbr:
2963   case Hexagon::L2_loadrub_pi:
2964   case Hexagon::L2_loadrub_pci:
2965   case Hexagon::L2_loadrub_pcr:
2966   case Hexagon::L2_loadbzw2_io:
2967   case Hexagon::L4_loadbzw2_ur:
2968   case Hexagon::L4_loadbzw2_ap:
2969   case Hexagon::L2_loadbzw2_pr:
2970   case Hexagon::L2_loadbzw2_pbr:
2971   case Hexagon::L2_loadbzw2_pi:
2972   case Hexagon::L2_loadbzw2_pci:
2973   case Hexagon::L2_loadbzw2_pcr:
2974   case Hexagon::L2_loadbzw4_io:
2975   case Hexagon::L4_loadbzw4_ur:
2976   case Hexagon::L4_loadbzw4_ap:
2977   case Hexagon::L2_loadbzw4_pr:
2978   case Hexagon::L2_loadbzw4_pbr:
2979   case Hexagon::L2_loadbzw4_pi:
2980   case Hexagon::L2_loadbzw4_pci:
2981   case Hexagon::L2_loadbzw4_pcr:
2982   case Hexagon::L4_loadrub_rr:
2983   case Hexagon::L2_ploadrubt_io:
2984   case Hexagon::L2_ploadrubt_pi:
2985   case Hexagon::L2_ploadrubf_io:
2986   case Hexagon::L2_ploadrubf_pi:
2987   case Hexagon::L2_ploadrubtnew_io:
2988   case Hexagon::L2_ploadrubfnew_io:
2989   case Hexagon::L4_ploadrubt_rr:
2990   case Hexagon::L4_ploadrubf_rr:
2991   case Hexagon::L4_ploadrubtnew_rr:
2992   case Hexagon::L4_ploadrubfnew_rr:
2993   case Hexagon::L2_ploadrubtnew_pi:
2994   case Hexagon::L2_ploadrubfnew_pi:
2995   case Hexagon::L4_ploadrubt_abs:
2996   case Hexagon::L4_ploadrubf_abs:
2997   case Hexagon::L4_ploadrubtnew_abs:
2998   case Hexagon::L4_ploadrubfnew_abs:
2999   case Hexagon::L2_loadrubgp:
3000   // Half
3001   case Hexagon::L2_loadruh_io:
3002   case Hexagon::L4_loadruh_ur:
3003   case Hexagon::L4_loadruh_ap:
3004   case Hexagon::L2_loadruh_pr:
3005   case Hexagon::L2_loadruh_pbr:
3006   case Hexagon::L2_loadruh_pi:
3007   case Hexagon::L2_loadruh_pci:
3008   case Hexagon::L2_loadruh_pcr:
3009   case Hexagon::L4_loadruh_rr:
3010   case Hexagon::L2_ploadruht_io:
3011   case Hexagon::L2_ploadruht_pi:
3012   case Hexagon::L2_ploadruhf_io:
3013   case Hexagon::L2_ploadruhf_pi:
3014   case Hexagon::L2_ploadruhtnew_io:
3015   case Hexagon::L2_ploadruhfnew_io:
3016   case Hexagon::L4_ploadruht_rr:
3017   case Hexagon::L4_ploadruhf_rr:
3018   case Hexagon::L4_ploadruhtnew_rr:
3019   case Hexagon::L4_ploadruhfnew_rr:
3020   case Hexagon::L2_ploadruhtnew_pi:
3021   case Hexagon::L2_ploadruhfnew_pi:
3022   case Hexagon::L4_ploadruht_abs:
3023   case Hexagon::L4_ploadruhf_abs:
3024   case Hexagon::L4_ploadruhtnew_abs:
3025   case Hexagon::L4_ploadruhfnew_abs:
3026   case Hexagon::L2_loadruhgp:
3027     return true;
3028   default:
3029     return false;
3030   }
3031 }
3032 
3033 // Add latency to instruction.
3034 bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
3035       const MachineInstr &MI2) const {
3036   if (isHVXVec(MI1) && isHVXVec(MI2))
3037     if (!isVecUsableNextPacket(MI1, MI2))
3038       return true;
3039   return false;
3040 }
3041 
3042 /// Get the base register and byte offset of a load/store instr.
3043 bool HexagonInstrInfo::getMemOperandsWithOffsetWidth(
3044     const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
3045     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3046     const TargetRegisterInfo *TRI) const {
3047   OffsetIsScalable = false;
3048   const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width);
3049   if (!BaseOp || !BaseOp->isReg())
3050     return false;
3051   BaseOps.push_back(BaseOp);
3052   return true;
3053 }
3054 
3055 /// Can these instructions execute at the same time in a bundle.
3056 bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First,
3057       const MachineInstr &Second) const {
3058   if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
3059     const MachineOperand &Op = Second.getOperand(0);
3060     if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
3061       return true;
3062   }
3063   if (DisableNVSchedule)
3064     return false;
3065   if (mayBeNewStore(Second)) {
3066     // Make sure the definition of the first instruction is the value being
3067     // stored.
3068     const MachineOperand &Stored =
3069       Second.getOperand(Second.getNumOperands() - 1);
3070     if (!Stored.isReg())
3071       return false;
3072     for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
3073       const MachineOperand &Op = First.getOperand(i);
3074       if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
3075         return true;
3076     }
3077   }
3078   return false;
3079 }
3080 
3081 bool HexagonInstrInfo::doesNotReturn(const MachineInstr &CallMI) const {
3082   unsigned Opc = CallMI.getOpcode();
3083   return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
3084 }
3085 
3086 bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
3087   for (auto &I : *B)
3088     if (I.isEHLabel())
3089       return true;
3090   return false;
3091 }
3092 
3093 // Returns true if an instruction can be converted into a non-extended
3094 // equivalent instruction.
3095 bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr &MI) const {
3096   short NonExtOpcode;
3097   // Check if the instruction has a register form that uses register in place
3098   // of the extended operand, if so return that as the non-extended form.
3099   if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
3100     return true;
3101 
3102   if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3103     // Check addressing mode and retrieve non-ext equivalent instruction.
3104 
3105     switch (getAddrMode(MI)) {
3106     case HexagonII::Absolute:
3107       // Load/store with absolute addressing mode can be converted into
3108       // base+offset mode.
3109       NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
3110       break;
3111     case HexagonII::BaseImmOffset:
3112       // Load/store with base+offset addressing mode can be converted into
3113       // base+register offset addressing mode. However left shift operand should
3114       // be set to 0.
3115       NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
3116       break;
3117     case HexagonII::BaseLongOffset:
3118       NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
3119       break;
3120     default:
3121       return false;
3122     }
3123     if (NonExtOpcode < 0)
3124       return false;
3125     return true;
3126   }
3127   return false;
3128 }
3129 
3130 bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr &MI) const {
3131   return Hexagon::getRealHWInstr(MI.getOpcode(),
3132                                  Hexagon::InstrType_Pseudo) >= 0;
3133 }
3134 
3135 bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
3136       const {
3137   MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
3138   while (I != E) {
3139     if (I->isBarrier())
3140       return true;
3141     ++I;
3142   }
3143   return false;
3144 }
3145 
3146 // Returns true, if a LD insn can be promoted to a cur load.
3147 bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr &MI) const {
3148   const uint64_t F = MI.getDesc().TSFlags;
3149   return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
3150          Subtarget.hasV60Ops();
3151 }
3152 
3153 // Returns true, if a ST insn can be promoted to a new-value store.
3154 bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
3155   if (MI.mayStore() && !Subtarget.useNewValueStores())
3156     return false;
3157 
3158   const uint64_t F = MI.getDesc().TSFlags;
3159   return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
3160 }
3161 
3162 bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
3163       const MachineInstr &ConsMI) const {
3164   // There is no stall when ProdMI is not a V60 vector.
3165   if (!isHVXVec(ProdMI))
3166     return false;
3167 
3168   // There is no stall when ProdMI and ConsMI are not dependent.
3169   if (!isDependent(ProdMI, ConsMI))
3170     return false;
3171 
3172   // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
3173   // are scheduled in consecutive packets.
3174   if (isVecUsableNextPacket(ProdMI, ConsMI))
3175     return false;
3176 
3177   return true;
3178 }
3179 
3180 bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
3181       MachineBasicBlock::const_instr_iterator BII) const {
3182   // There is no stall when I is not a V60 vector.
3183   if (!isHVXVec(MI))
3184     return false;
3185 
3186   MachineBasicBlock::const_instr_iterator MII = BII;
3187   MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3188 
3189   if (!MII->isBundle())
3190     return producesStall(*MII, MI);
3191 
3192   for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3193     const MachineInstr &J = *MII;
3194     if (producesStall(J, MI))
3195       return true;
3196   }
3197   return false;
3198 }
3199 
3200 bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr &MI,
3201       Register PredReg) const {
3202   for (const MachineOperand &MO : MI.operands()) {
3203     // Predicate register must be explicitly defined.
3204     if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3205       return false;
3206     if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3207       return false;
3208   }
3209 
3210   // Instruction that produce late predicate cannot be used as sources of
3211   // dot-new.
3212   switch (MI.getOpcode()) {
3213     case Hexagon::A4_addp_c:
3214     case Hexagon::A4_subp_c:
3215     case Hexagon::A4_tlbmatch:
3216     case Hexagon::A5_ACS:
3217     case Hexagon::F2_sfinvsqrta:
3218     case Hexagon::F2_sfrecipa:
3219     case Hexagon::J2_endloop0:
3220     case Hexagon::J2_endloop01:
3221     case Hexagon::J2_ploop1si:
3222     case Hexagon::J2_ploop1sr:
3223     case Hexagon::J2_ploop2si:
3224     case Hexagon::J2_ploop2sr:
3225     case Hexagon::J2_ploop3si:
3226     case Hexagon::J2_ploop3sr:
3227     case Hexagon::S2_cabacdecbin:
3228     case Hexagon::S2_storew_locked:
3229     case Hexagon::S4_stored_locked:
3230       return false;
3231   }
3232   return true;
3233 }
3234 
3235 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3236   return Opcode == Hexagon::J2_jumpt      ||
3237          Opcode == Hexagon::J2_jumptpt    ||
3238          Opcode == Hexagon::J2_jumpf      ||
3239          Opcode == Hexagon::J2_jumpfpt    ||
3240          Opcode == Hexagon::J2_jumptnew   ||
3241          Opcode == Hexagon::J2_jumpfnew   ||
3242          Opcode == Hexagon::J2_jumptnewpt ||
3243          Opcode == Hexagon::J2_jumpfnewpt;
3244 }
3245 
3246 bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
3247   if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3248     return false;
3249   return !isPredicatedTrue(Cond[0].getImm());
3250 }
3251 
3252 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
3253   const uint64_t F = MI.getDesc().TSFlags;
3254   return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
3255 }
3256 
3257 // Returns the base register in a memory access (load/store). The offset is
3258 // returned in Offset and the access size is returned in AccessSize.
3259 // If the base operand has a subregister or the offset field does not contain
3260 // an immediate value, return nullptr.
3261 MachineOperand *HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
3262                                                    int64_t &Offset,
3263                                                    unsigned &AccessSize) const {
3264   // Return if it is not a base+offset type instruction or a MemOp.
3265   if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
3266       getAddrMode(MI) != HexagonII::BaseLongOffset &&
3267       !isMemOp(MI) && !isPostIncrement(MI))
3268     return nullptr;
3269 
3270   AccessSize = getMemAccessSize(MI);
3271 
3272   unsigned BasePos = 0, OffsetPos = 0;
3273   if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
3274     return nullptr;
3275 
3276   // Post increment updates its EA after the mem access,
3277   // so we need to treat its offset as zero.
3278   if (isPostIncrement(MI)) {
3279     Offset = 0;
3280   } else {
3281     const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
3282     if (!OffsetOp.isImm())
3283       return nullptr;
3284     Offset = OffsetOp.getImm();
3285   }
3286 
3287   const MachineOperand &BaseOp = MI.getOperand(BasePos);
3288   if (BaseOp.getSubReg() != 0)
3289     return nullptr;
3290   return &const_cast<MachineOperand&>(BaseOp);
3291 }
3292 
3293 /// Return the position of the base and offset operands for this instruction.
3294 bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr &MI,
3295       unsigned &BasePos, unsigned &OffsetPos) const {
3296   if (!isAddrModeWithOffset(MI) && !isPostIncrement(MI))
3297     return false;
3298 
3299   // Deal with memops first.
3300   if (isMemOp(MI)) {
3301     BasePos = 0;
3302     OffsetPos = 1;
3303   } else if (MI.mayStore()) {
3304     BasePos = 0;
3305     OffsetPos = 1;
3306   } else if (MI.mayLoad()) {
3307     BasePos = 1;
3308     OffsetPos = 2;
3309   } else
3310     return false;
3311 
3312   if (isPredicated(MI)) {
3313     BasePos++;
3314     OffsetPos++;
3315   }
3316   if (isPostIncrement(MI)) {
3317     BasePos++;
3318     OffsetPos++;
3319   }
3320 
3321   if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3322     return false;
3323 
3324   return true;
3325 }
3326 
3327 // Inserts branching instructions in reverse order of their occurrence.
3328 // e.g. jump_t t1 (i1)
3329 // jump t2        (i2)
3330 // Jumpers = {i2, i1}
3331 SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
3332       MachineBasicBlock& MBB) const {
3333   SmallVector<MachineInstr*, 2> Jumpers;
3334   // If the block has no terminators, it just falls into the block after it.
3335   MachineBasicBlock::instr_iterator I = MBB.instr_end();
3336   if (I == MBB.instr_begin())
3337     return Jumpers;
3338 
3339   // A basic block may looks like this:
3340   //
3341   //  [   insn
3342   //     EH_LABEL
3343   //      insn
3344   //      insn
3345   //      insn
3346   //     EH_LABEL
3347   //      insn     ]
3348   //
3349   // It has two succs but does not have a terminator
3350   // Don't know how to handle it.
3351   do {
3352     --I;
3353     if (I->isEHLabel())
3354       return Jumpers;
3355   } while (I != MBB.instr_begin());
3356 
3357   I = MBB.instr_end();
3358   --I;
3359 
3360   while (I->isDebugInstr()) {
3361     if (I == MBB.instr_begin())
3362       return Jumpers;
3363     --I;
3364   }
3365   if (!isUnpredicatedTerminator(*I))
3366     return Jumpers;
3367 
3368   // Get the last instruction in the block.
3369   MachineInstr *LastInst = &*I;
3370   Jumpers.push_back(LastInst);
3371   MachineInstr *SecondLastInst = nullptr;
3372   // Find one more terminator if present.
3373   do {
3374     if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3375       if (!SecondLastInst) {
3376         SecondLastInst = &*I;
3377         Jumpers.push_back(SecondLastInst);
3378       } else // This is a third branch.
3379         return Jumpers;
3380     }
3381     if (I == MBB.instr_begin())
3382       break;
3383     --I;
3384   } while (true);
3385   return Jumpers;
3386 }
3387 
3388 // Returns Operand Index for the constant extended instruction.
3389 unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr &MI) const {
3390   const uint64_t F = MI.getDesc().TSFlags;
3391   return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
3392 }
3393 
3394 // See if instruction could potentially be a duplex candidate.
3395 // If so, return its group. Zero otherwise.
3396 HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
3397       const MachineInstr &MI) const {
3398   Register DstReg, SrcReg, Src1Reg, Src2Reg;
3399 
3400   switch (MI.getOpcode()) {
3401   default:
3402     return HexagonII::HCG_None;
3403   //
3404   // Compound pairs.
3405   // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3406   // "Rd16=#U6 ; jump #r9:2"
3407   // "Rd16=Rs16 ; jump #r9:2"
3408   //
3409   case Hexagon::C2_cmpeq:
3410   case Hexagon::C2_cmpgt:
3411   case Hexagon::C2_cmpgtu:
3412     DstReg = MI.getOperand(0).getReg();
3413     Src1Reg = MI.getOperand(1).getReg();
3414     Src2Reg = MI.getOperand(2).getReg();
3415     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3416         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3417         isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3418       return HexagonII::HCG_A;
3419     break;
3420   case Hexagon::C2_cmpeqi:
3421   case Hexagon::C2_cmpgti:
3422   case Hexagon::C2_cmpgtui:
3423     // P0 = cmp.eq(Rs,#u2)
3424     DstReg = MI.getOperand(0).getReg();
3425     SrcReg = MI.getOperand(1).getReg();
3426     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3427         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3428         isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3429         ((isUInt<5>(MI.getOperand(2).getImm())) ||
3430          (MI.getOperand(2).getImm() == -1)))
3431       return HexagonII::HCG_A;
3432     break;
3433   case Hexagon::A2_tfr:
3434     // Rd = Rs
3435     DstReg = MI.getOperand(0).getReg();
3436     SrcReg = MI.getOperand(1).getReg();
3437     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3438       return HexagonII::HCG_A;
3439     break;
3440   case Hexagon::A2_tfrsi:
3441     // Rd = #u6
3442     // Do not test for #u6 size since the const is getting extended
3443     // regardless and compound could be formed.
3444     DstReg = MI.getOperand(0).getReg();
3445     if (isIntRegForSubInst(DstReg))
3446       return HexagonII::HCG_A;
3447     break;
3448   case Hexagon::S2_tstbit_i:
3449     DstReg = MI.getOperand(0).getReg();
3450     Src1Reg = MI.getOperand(1).getReg();
3451     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3452         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3453         MI.getOperand(2).isImm() &&
3454         isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3455       return HexagonII::HCG_A;
3456     break;
3457   // The fact that .new form is used pretty much guarantees
3458   // that predicate register will match. Nevertheless,
3459   // there could be some false positives without additional
3460   // checking.
3461   case Hexagon::J2_jumptnew:
3462   case Hexagon::J2_jumpfnew:
3463   case Hexagon::J2_jumptnewpt:
3464   case Hexagon::J2_jumpfnewpt:
3465     Src1Reg = MI.getOperand(0).getReg();
3466     if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3467         (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3468       return HexagonII::HCG_B;
3469     break;
3470   // Transfer and jump:
3471   // Rd=#U6 ; jump #r9:2
3472   // Rd=Rs ; jump #r9:2
3473   // Do not test for jump range here.
3474   case Hexagon::J2_jump:
3475   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3476   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3477     return HexagonII::HCG_C;
3478   }
3479 
3480   return HexagonII::HCG_None;
3481 }
3482 
3483 // Returns -1 when there is no opcode found.
3484 unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA,
3485       const MachineInstr &GB) const {
3486   assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A);
3487   assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B);
3488   if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3489       (GB.getOpcode() != Hexagon::J2_jumptnew))
3490     return -1u;
3491   Register DestReg = GA.getOperand(0).getReg();
3492   if (!GB.readsRegister(DestReg))
3493     return -1u;
3494   if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3495     return -1u;
3496   // The value compared against must be either u5 or -1.
3497   const MachineOperand &CmpOp = GA.getOperand(2);
3498   if (!CmpOp.isImm())
3499     return -1u;
3500   int V = CmpOp.getImm();
3501   if (V == -1)
3502     return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3503                                   : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3504   if (!isUInt<5>(V))
3505     return -1u;
3506   return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3507                                 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3508 }
3509 
3510 // Returns -1 if there is no opcode found.
3511 int HexagonInstrInfo::getDuplexOpcode(const MachineInstr &MI,
3512                                       bool ForBigCore) const {
3513   // Static table to switch the opcodes across Tiny Core and Big Core.
3514   // dup_ opcodes are Big core opcodes.
3515   // NOTE: There are special instructions that need to handled later.
3516   // L4_return* instructions, they will only occupy SLOT0 (on big core too).
3517   // PS_jmpret - This pseudo translates to J2_jumpr which occupies only SLOT2.
3518   // The compiler need to base the root instruction to L6_return_map_to_raw
3519   // which can go any slot.
3520   static const std::map<unsigned, unsigned> DupMap = {
3521       {Hexagon::A2_add, Hexagon::dup_A2_add},
3522       {Hexagon::A2_addi, Hexagon::dup_A2_addi},
3523       {Hexagon::A2_andir, Hexagon::dup_A2_andir},
3524       {Hexagon::A2_combineii, Hexagon::dup_A2_combineii},
3525       {Hexagon::A2_sxtb, Hexagon::dup_A2_sxtb},
3526       {Hexagon::A2_sxth, Hexagon::dup_A2_sxth},
3527       {Hexagon::A2_tfr, Hexagon::dup_A2_tfr},
3528       {Hexagon::A2_tfrsi, Hexagon::dup_A2_tfrsi},
3529       {Hexagon::A2_zxtb, Hexagon::dup_A2_zxtb},
3530       {Hexagon::A2_zxth, Hexagon::dup_A2_zxth},
3531       {Hexagon::A4_combineii, Hexagon::dup_A4_combineii},
3532       {Hexagon::A4_combineir, Hexagon::dup_A4_combineir},
3533       {Hexagon::A4_combineri, Hexagon::dup_A4_combineri},
3534       {Hexagon::C2_cmoveif, Hexagon::dup_C2_cmoveif},
3535       {Hexagon::C2_cmoveit, Hexagon::dup_C2_cmoveit},
3536       {Hexagon::C2_cmovenewif, Hexagon::dup_C2_cmovenewif},
3537       {Hexagon::C2_cmovenewit, Hexagon::dup_C2_cmovenewit},
3538       {Hexagon::C2_cmpeqi, Hexagon::dup_C2_cmpeqi},
3539       {Hexagon::L2_deallocframe, Hexagon::dup_L2_deallocframe},
3540       {Hexagon::L2_loadrb_io, Hexagon::dup_L2_loadrb_io},
3541       {Hexagon::L2_loadrd_io, Hexagon::dup_L2_loadrd_io},
3542       {Hexagon::L2_loadrh_io, Hexagon::dup_L2_loadrh_io},
3543       {Hexagon::L2_loadri_io, Hexagon::dup_L2_loadri_io},
3544       {Hexagon::L2_loadrub_io, Hexagon::dup_L2_loadrub_io},
3545       {Hexagon::L2_loadruh_io, Hexagon::dup_L2_loadruh_io},
3546       {Hexagon::S2_allocframe, Hexagon::dup_S2_allocframe},
3547       {Hexagon::S2_storerb_io, Hexagon::dup_S2_storerb_io},
3548       {Hexagon::S2_storerd_io, Hexagon::dup_S2_storerd_io},
3549       {Hexagon::S2_storerh_io, Hexagon::dup_S2_storerh_io},
3550       {Hexagon::S2_storeri_io, Hexagon::dup_S2_storeri_io},
3551       {Hexagon::S4_storeirb_io, Hexagon::dup_S4_storeirb_io},
3552       {Hexagon::S4_storeiri_io, Hexagon::dup_S4_storeiri_io},
3553   };
3554   unsigned OpNum = MI.getOpcode();
3555   // Conversion to Big core.
3556   if (ForBigCore) {
3557     auto Iter = DupMap.find(OpNum);
3558     if (Iter != DupMap.end())
3559       return Iter->second;
3560   } else { // Conversion to Tiny core.
3561     for (const auto &Iter : DupMap)
3562       if (Iter.second == OpNum)
3563         return Iter.first;
3564   }
3565   return -1;
3566 }
3567 
3568 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3569   enum Hexagon::PredSense inPredSense;
3570   inPredSense = invertPredicate ? Hexagon::PredSense_false :
3571                                   Hexagon::PredSense_true;
3572   int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3573   if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3574     return CondOpcode;
3575 
3576   llvm_unreachable("Unexpected predicable instruction");
3577 }
3578 
3579 // Return the cur value instruction for a given store.
3580 int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
3581   switch (MI.getOpcode()) {
3582   default: llvm_unreachable("Unknown .cur type");
3583   case Hexagon::V6_vL32b_pi:
3584     return Hexagon::V6_vL32b_cur_pi;
3585   case Hexagon::V6_vL32b_ai:
3586     return Hexagon::V6_vL32b_cur_ai;
3587   case Hexagon::V6_vL32b_nt_pi:
3588     return Hexagon::V6_vL32b_nt_cur_pi;
3589   case Hexagon::V6_vL32b_nt_ai:
3590     return Hexagon::V6_vL32b_nt_cur_ai;
3591   case Hexagon::V6_vL32b_ppu:
3592     return Hexagon::V6_vL32b_cur_ppu;
3593   case Hexagon::V6_vL32b_nt_ppu:
3594     return Hexagon::V6_vL32b_nt_cur_ppu;
3595   }
3596   return 0;
3597 }
3598 
3599 // Return the regular version of the .cur instruction.
3600 int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
3601   switch (MI.getOpcode()) {
3602   default: llvm_unreachable("Unknown .cur type");
3603   case Hexagon::V6_vL32b_cur_pi:
3604     return Hexagon::V6_vL32b_pi;
3605   case Hexagon::V6_vL32b_cur_ai:
3606     return Hexagon::V6_vL32b_ai;
3607   case Hexagon::V6_vL32b_nt_cur_pi:
3608     return Hexagon::V6_vL32b_nt_pi;
3609   case Hexagon::V6_vL32b_nt_cur_ai:
3610     return Hexagon::V6_vL32b_nt_ai;
3611   case Hexagon::V6_vL32b_cur_ppu:
3612     return Hexagon::V6_vL32b_ppu;
3613   case Hexagon::V6_vL32b_nt_cur_ppu:
3614     return Hexagon::V6_vL32b_nt_ppu;
3615   }
3616   return 0;
3617 }
3618 
3619 // The diagram below shows the steps involved in the conversion of a predicated
3620 // store instruction to its .new predicated new-value form.
3621 //
3622 // Note: It doesn't include conditional new-value stores as they can't be
3623 // converted to .new predicate.
3624 //
3625 //               p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3626 //                ^           ^
3627 //               /             \ (not OK. it will cause new-value store to be
3628 //              /               X conditional on p0.new while R2 producer is
3629 //             /                 \ on p0)
3630 //            /                   \.
3631 //     p.new store                 p.old NV store
3632 // [if(p0.new)memw(R0+#0)=R2]    [if(p0)memw(R0+#0)=R2.new]
3633 //            ^                  ^
3634 //             \                /
3635 //              \              /
3636 //               \            /
3637 //                 p.old store
3638 //             [if (p0)memw(R0+#0)=R2]
3639 //
3640 // The following set of instructions further explains the scenario where
3641 // conditional new-value store becomes invalid when promoted to .new predicate
3642 // form.
3643 //
3644 // { 1) if (p0) r0 = add(r1, r2)
3645 //   2) p0 = cmp.eq(r3, #0) }
3646 //
3647 //   3) if (p0) memb(r1+#0) = r0  --> this instruction can't be grouped with
3648 // the first two instructions because in instr 1, r0 is conditional on old value
3649 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3650 // is not valid for new-value stores.
3651 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3652 // from the "Conditional Store" list. Because a predicated new value store
3653 // would NOT be promoted to a double dot new store. See diagram below:
3654 // This function returns yes for those stores that are predicated but not
3655 // yet promoted to predicate dot new instructions.
3656 //
3657 //                          +---------------------+
3658 //                    /-----| if (p0) memw(..)=r0 |---------\~
3659 //                   ||     +---------------------+         ||
3660 //          promote  ||       /\       /\                   ||  promote
3661 //                   ||      /||\     /||\                  ||
3662 //                  \||/    demote     ||                  \||/
3663 //                   \/       ||       ||                   \/
3664 //       +-------------------------+   ||   +-------------------------+
3665 //       | if (p0.new) memw(..)=r0 |   ||   | if (p0) memw(..)=r0.new |
3666 //       +-------------------------+   ||   +-------------------------+
3667 //                        ||           ||         ||
3668 //                        ||         demote      \||/
3669 //                      promote        ||         \/ NOT possible
3670 //                        ||           ||         /\~
3671 //                       \||/          ||        /||\~
3672 //                        \/           ||         ||
3673 //                      +-----------------------------+
3674 //                      | if (p0.new) memw(..)=r0.new |
3675 //                      +-----------------------------+
3676 //                           Double Dot New Store
3677 //
3678 // Returns the most basic instruction for the .new predicated instructions and
3679 // new-value stores.
3680 // For example, all of the following instructions will be converted back to the
3681 // same instruction:
3682 // 1) if (p0.new) memw(R0+#0) = R1.new  --->
3683 // 2) if (p0) memw(R0+#0)= R1.new      -------> if (p0) memw(R0+#0) = R1
3684 // 3) if (p0.new) memw(R0+#0) = R1      --->
3685 //
3686 // To understand the translation of instruction 1 to its original form, consider
3687 // a packet with 3 instructions.
3688 // { p0 = cmp.eq(R0,R1)
3689 //   if (p0.new) R2 = add(R3, R4)
3690 //   R5 = add (R3, R1)
3691 // }
3692 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3693 //
3694 // This instruction can be part of the previous packet only if both p0 and R2
3695 // are promoted to .new values. This promotion happens in steps, first
3696 // predicate register is promoted to .new and in the next iteration R2 is
3697 // promoted. Therefore, in case of dependence check failure (due to R5) during
3698 // next iteration, it should be converted back to its most basic form.
3699 
3700 // Return the new value instruction for a given store.
3701 int HexagonInstrInfo::getDotNewOp(const MachineInstr &MI) const {
3702   int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3703   if (NVOpcode >= 0) // Valid new-value store instruction.
3704     return NVOpcode;
3705 
3706   switch (MI.getOpcode()) {
3707   default:
3708     report_fatal_error(Twine("Unknown .new type: ") +
3709                        std::to_string(MI.getOpcode()));
3710   case Hexagon::S4_storerb_ur:
3711     return Hexagon::S4_storerbnew_ur;
3712 
3713   case Hexagon::S2_storerb_pci:
3714     return Hexagon::S2_storerb_pci;
3715 
3716   case Hexagon::S2_storeri_pci:
3717     return Hexagon::S2_storeri_pci;
3718 
3719   case Hexagon::S2_storerh_pci:
3720     return Hexagon::S2_storerh_pci;
3721 
3722   case Hexagon::S2_storerd_pci:
3723     return Hexagon::S2_storerd_pci;
3724 
3725   case Hexagon::S2_storerf_pci:
3726     return Hexagon::S2_storerf_pci;
3727 
3728   case Hexagon::V6_vS32b_ai:
3729     return Hexagon::V6_vS32b_new_ai;
3730 
3731   case Hexagon::V6_vS32b_pi:
3732     return Hexagon::V6_vS32b_new_pi;
3733   }
3734   return 0;
3735 }
3736 
3737 // Returns the opcode to use when converting MI, which is a conditional jump,
3738 // into a conditional instruction which uses the .new value of the predicate.
3739 // We also use branch probabilities to add a hint to the jump.
3740 // If MBPI is null, all edges will be treated as equally likely for the
3741 // purposes of establishing a predication hint.
3742 int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr &MI,
3743       const MachineBranchProbabilityInfo *MBPI) const {
3744   // We assume that block can have at most two successors.
3745   const MachineBasicBlock *Src = MI.getParent();
3746   const MachineOperand &BrTarget = MI.getOperand(1);
3747   bool Taken = false;
3748   const BranchProbability OneHalf(1, 2);
3749 
3750   auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3751                                     const MachineBasicBlock *Dst) {
3752     if (MBPI)
3753       return MBPI->getEdgeProbability(Src, Dst);
3754     return BranchProbability(1, Src->succ_size());
3755   };
3756 
3757   if (BrTarget.isMBB()) {
3758     const MachineBasicBlock *Dst = BrTarget.getMBB();
3759     Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3760   } else {
3761     // The branch target is not a basic block (most likely a function).
3762     // Since BPI only gives probabilities for targets that are basic blocks,
3763     // try to identify another target of this branch (potentially a fall-
3764     // -through) and check the probability of that target.
3765     //
3766     // The only handled branch combinations are:
3767     // - one conditional branch,
3768     // - one conditional branch followed by one unconditional branch.
3769     // Otherwise, assume not-taken.
3770     assert(MI.isConditionalBranch());
3771     const MachineBasicBlock &B = *MI.getParent();
3772     bool SawCond = false, Bad = false;
3773     for (const MachineInstr &I : B) {
3774       if (!I.isBranch())
3775         continue;
3776       if (I.isConditionalBranch()) {
3777         SawCond = true;
3778         if (&I != &MI) {
3779           Bad = true;
3780           break;
3781         }
3782       }
3783       if (I.isUnconditionalBranch() && !SawCond) {
3784         Bad = true;
3785         break;
3786       }
3787     }
3788     if (!Bad) {
3789       MachineBasicBlock::const_instr_iterator It(MI);
3790       MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3791       if (NextIt == B.instr_end()) {
3792         // If this branch is the last, look for the fall-through block.
3793         for (const MachineBasicBlock *SB : B.successors()) {
3794           if (!B.isLayoutSuccessor(SB))
3795             continue;
3796           Taken = getEdgeProbability(Src, SB) < OneHalf;
3797           break;
3798         }
3799       } else {
3800         assert(NextIt->isUnconditionalBranch());
3801         // Find the first MBB operand and assume it's the target.
3802         const MachineBasicBlock *BT = nullptr;
3803         for (const MachineOperand &Op : NextIt->operands()) {
3804           if (!Op.isMBB())
3805             continue;
3806           BT = Op.getMBB();
3807           break;
3808         }
3809         Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3810       }
3811     } // if (!Bad)
3812   }
3813 
3814   // The Taken flag should be set to something reasonable by this point.
3815 
3816   switch (MI.getOpcode()) {
3817   case Hexagon::J2_jumpt:
3818     return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3819   case Hexagon::J2_jumpf:
3820     return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3821 
3822   default:
3823     llvm_unreachable("Unexpected jump instruction.");
3824   }
3825 }
3826 
3827 // Return .new predicate version for an instruction.
3828 int HexagonInstrInfo::getDotNewPredOp(const MachineInstr &MI,
3829       const MachineBranchProbabilityInfo *MBPI) const {
3830   switch (MI.getOpcode()) {
3831   // Condtional Jumps
3832   case Hexagon::J2_jumpt:
3833   case Hexagon::J2_jumpf:
3834     return getDotNewPredJumpOp(MI, MBPI);
3835   }
3836 
3837   int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3838   if (NewOpcode >= 0)
3839     return NewOpcode;
3840   return 0;
3841 }
3842 
3843 int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const {
3844   int NewOp = MI.getOpcode();
3845   if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3846     NewOp = Hexagon::getPredOldOpcode(NewOp);
3847     // All Hexagon architectures have prediction bits on dot-new branches,
3848     // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3849     // to pick the right opcode when converting back to dot-old.
3850     if (!Subtarget.hasFeature(Hexagon::ArchV60)) {
3851       switch (NewOp) {
3852       case Hexagon::J2_jumptpt:
3853         NewOp = Hexagon::J2_jumpt;
3854         break;
3855       case Hexagon::J2_jumpfpt:
3856         NewOp = Hexagon::J2_jumpf;
3857         break;
3858       case Hexagon::J2_jumprtpt:
3859         NewOp = Hexagon::J2_jumprt;
3860         break;
3861       case Hexagon::J2_jumprfpt:
3862         NewOp = Hexagon::J2_jumprf;
3863         break;
3864       }
3865     }
3866     assert(NewOp >= 0 &&
3867            "Couldn't change predicate new instruction to its old form.");
3868   }
3869 
3870   if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3871     NewOp = Hexagon::getNonNVStore(NewOp);
3872     assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3873   }
3874 
3875   if (Subtarget.hasV60Ops())
3876     return NewOp;
3877 
3878   // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3879   switch (NewOp) {
3880   case Hexagon::J2_jumpfpt:
3881     return Hexagon::J2_jumpf;
3882   case Hexagon::J2_jumptpt:
3883     return Hexagon::J2_jumpt;
3884   case Hexagon::J2_jumprfpt:
3885     return Hexagon::J2_jumprf;
3886   case Hexagon::J2_jumprtpt:
3887     return Hexagon::J2_jumprt;
3888   }
3889   return NewOp;
3890 }
3891 
3892 // See if instruction could potentially be a duplex candidate.
3893 // If so, return its group. Zero otherwise.
3894 HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3895       const MachineInstr &MI) const {
3896   Register DstReg, SrcReg, Src1Reg, Src2Reg;
3897   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3898 
3899   switch (MI.getOpcode()) {
3900   default:
3901     return HexagonII::HSIG_None;
3902   //
3903   // Group L1:
3904   //
3905   // Rd = memw(Rs+#u4:2)
3906   // Rd = memub(Rs+#u4:0)
3907   case Hexagon::L2_loadri_io:
3908   case Hexagon::dup_L2_loadri_io:
3909     DstReg = MI.getOperand(0).getReg();
3910     SrcReg = MI.getOperand(1).getReg();
3911     // Special case this one from Group L2.
3912     // Rd = memw(r29+#u5:2)
3913     if (isIntRegForSubInst(DstReg)) {
3914       if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3915           HRI.getStackRegister() == SrcReg &&
3916           MI.getOperand(2).isImm() &&
3917           isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3918         return HexagonII::HSIG_L2;
3919       // Rd = memw(Rs+#u4:2)
3920       if (isIntRegForSubInst(SrcReg) &&
3921           (MI.getOperand(2).isImm() &&
3922           isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3923         return HexagonII::HSIG_L1;
3924     }
3925     break;
3926   case Hexagon::L2_loadrub_io:
3927   case Hexagon::dup_L2_loadrub_io:
3928     // Rd = memub(Rs+#u4:0)
3929     DstReg = MI.getOperand(0).getReg();
3930     SrcReg = MI.getOperand(1).getReg();
3931     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3932         MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3933       return HexagonII::HSIG_L1;
3934     break;
3935   //
3936   // Group L2:
3937   //
3938   // Rd = memh/memuh(Rs+#u3:1)
3939   // Rd = memb(Rs+#u3:0)
3940   // Rd = memw(r29+#u5:2) - Handled above.
3941   // Rdd = memd(r29+#u5:3)
3942   // deallocframe
3943   // [if ([!]p0[.new])] dealloc_return
3944   // [if ([!]p0[.new])] jumpr r31
3945   case Hexagon::L2_loadrh_io:
3946   case Hexagon::L2_loadruh_io:
3947   case Hexagon::dup_L2_loadrh_io:
3948   case Hexagon::dup_L2_loadruh_io:
3949     // Rd = memh/memuh(Rs+#u3:1)
3950     DstReg = MI.getOperand(0).getReg();
3951     SrcReg = MI.getOperand(1).getReg();
3952     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3953         MI.getOperand(2).isImm() &&
3954         isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3955       return HexagonII::HSIG_L2;
3956     break;
3957   case Hexagon::L2_loadrb_io:
3958   case Hexagon::dup_L2_loadrb_io:
3959     // Rd = memb(Rs+#u3:0)
3960     DstReg = MI.getOperand(0).getReg();
3961     SrcReg = MI.getOperand(1).getReg();
3962     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3963         MI.getOperand(2).isImm() &&
3964         isUInt<3>(MI.getOperand(2).getImm()))
3965       return HexagonII::HSIG_L2;
3966     break;
3967   case Hexagon::L2_loadrd_io:
3968   case Hexagon::dup_L2_loadrd_io:
3969     // Rdd = memd(r29+#u5:3)
3970     DstReg = MI.getOperand(0).getReg();
3971     SrcReg = MI.getOperand(1).getReg();
3972     if (isDblRegForSubInst(DstReg, HRI) &&
3973         Hexagon::IntRegsRegClass.contains(SrcReg) &&
3974         HRI.getStackRegister() == SrcReg &&
3975         MI.getOperand(2).isImm() &&
3976         isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3977       return HexagonII::HSIG_L2;
3978     break;
3979   // dealloc_return is not documented in Hexagon Manual, but marked
3980   // with A_SUBINSN attribute in iset_v4classic.py.
3981   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3982   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3983   case Hexagon::L4_return:
3984   case Hexagon::L2_deallocframe:
3985   case Hexagon::dup_L2_deallocframe:
3986     return HexagonII::HSIG_L2;
3987   case Hexagon::EH_RETURN_JMPR:
3988   case Hexagon::PS_jmpret:
3989   case Hexagon::SL2_jumpr31:
3990     // jumpr r31
3991     // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
3992     DstReg = MI.getOperand(0).getReg();
3993     if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3994       return HexagonII::HSIG_L2;
3995     break;
3996   case Hexagon::PS_jmprett:
3997   case Hexagon::PS_jmpretf:
3998   case Hexagon::PS_jmprettnewpt:
3999   case Hexagon::PS_jmpretfnewpt:
4000   case Hexagon::PS_jmprettnew:
4001   case Hexagon::PS_jmpretfnew:
4002   case Hexagon::SL2_jumpr31_t:
4003   case Hexagon::SL2_jumpr31_f:
4004   case Hexagon::SL2_jumpr31_tnew:
4005   case Hexagon::SL2_jumpr31_fnew:
4006     DstReg = MI.getOperand(1).getReg();
4007     SrcReg = MI.getOperand(0).getReg();
4008     // [if ([!]p0[.new])] jumpr r31
4009     if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
4010         (Hexagon::P0 == SrcReg)) &&
4011         (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
4012       return HexagonII::HSIG_L2;
4013     break;
4014   case Hexagon::L4_return_t:
4015   case Hexagon::L4_return_f:
4016   case Hexagon::L4_return_tnew_pnt:
4017   case Hexagon::L4_return_fnew_pnt:
4018   case Hexagon::L4_return_tnew_pt:
4019   case Hexagon::L4_return_fnew_pt:
4020     // [if ([!]p0[.new])] dealloc_return
4021     SrcReg = MI.getOperand(0).getReg();
4022     if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
4023       return HexagonII::HSIG_L2;
4024     break;
4025   //
4026   // Group S1:
4027   //
4028   // memw(Rs+#u4:2) = Rt
4029   // memb(Rs+#u4:0) = Rt
4030   case Hexagon::S2_storeri_io:
4031   case Hexagon::dup_S2_storeri_io:
4032     // Special case this one from Group S2.
4033     // memw(r29+#u5:2) = Rt
4034     Src1Reg = MI.getOperand(0).getReg();
4035     Src2Reg = MI.getOperand(2).getReg();
4036     if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4037         isIntRegForSubInst(Src2Reg) &&
4038         HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4039         isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
4040       return HexagonII::HSIG_S2;
4041     // memw(Rs+#u4:2) = Rt
4042     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4043         MI.getOperand(1).isImm() &&
4044         isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
4045       return HexagonII::HSIG_S1;
4046     break;
4047   case Hexagon::S2_storerb_io:
4048   case Hexagon::dup_S2_storerb_io:
4049     // memb(Rs+#u4:0) = Rt
4050     Src1Reg = MI.getOperand(0).getReg();
4051     Src2Reg = MI.getOperand(2).getReg();
4052     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4053         MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
4054       return HexagonII::HSIG_S1;
4055     break;
4056   //
4057   // Group S2:
4058   //
4059   // memh(Rs+#u3:1) = Rt
4060   // memw(r29+#u5:2) = Rt
4061   // memd(r29+#s6:3) = Rtt
4062   // memw(Rs+#u4:2) = #U1
4063   // memb(Rs+#u4) = #U1
4064   // allocframe(#u5:3)
4065   case Hexagon::S2_storerh_io:
4066   case Hexagon::dup_S2_storerh_io:
4067     // memh(Rs+#u3:1) = Rt
4068     Src1Reg = MI.getOperand(0).getReg();
4069     Src2Reg = MI.getOperand(2).getReg();
4070     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4071         MI.getOperand(1).isImm() &&
4072         isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
4073       return HexagonII::HSIG_S1;
4074     break;
4075   case Hexagon::S2_storerd_io:
4076   case Hexagon::dup_S2_storerd_io:
4077     // memd(r29+#s6:3) = Rtt
4078     Src1Reg = MI.getOperand(0).getReg();
4079     Src2Reg = MI.getOperand(2).getReg();
4080     if (isDblRegForSubInst(Src2Reg, HRI) &&
4081         Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4082         HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4083         isShiftedInt<6,3>(MI.getOperand(1).getImm()))
4084       return HexagonII::HSIG_S2;
4085     break;
4086   case Hexagon::S4_storeiri_io:
4087   case Hexagon::dup_S4_storeiri_io:
4088     // memw(Rs+#u4:2) = #U1
4089     Src1Reg = MI.getOperand(0).getReg();
4090     if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
4091         isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
4092         MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4093       return HexagonII::HSIG_S2;
4094     break;
4095   case Hexagon::S4_storeirb_io:
4096   case Hexagon::dup_S4_storeirb_io:
4097     // memb(Rs+#u4) = #U1
4098     Src1Reg = MI.getOperand(0).getReg();
4099     if (isIntRegForSubInst(Src1Reg) &&
4100         MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
4101         MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4102       return HexagonII::HSIG_S2;
4103     break;
4104   case Hexagon::S2_allocframe:
4105   case Hexagon::dup_S2_allocframe:
4106     if (MI.getOperand(2).isImm() &&
4107         isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
4108       return HexagonII::HSIG_S1;
4109     break;
4110   //
4111   // Group A:
4112   //
4113   // Rx = add(Rx,#s7)
4114   // Rd = Rs
4115   // Rd = #u6
4116   // Rd = #-1
4117   // if ([!]P0[.new]) Rd = #0
4118   // Rd = add(r29,#u6:2)
4119   // Rx = add(Rx,Rs)
4120   // P0 = cmp.eq(Rs,#u2)
4121   // Rdd = combine(#0,Rs)
4122   // Rdd = combine(Rs,#0)
4123   // Rdd = combine(#u2,#U2)
4124   // Rd = add(Rs,#1)
4125   // Rd = add(Rs,#-1)
4126   // Rd = sxth/sxtb/zxtb/zxth(Rs)
4127   // Rd = and(Rs,#1)
4128   case Hexagon::A2_addi:
4129   case Hexagon::dup_A2_addi:
4130     DstReg = MI.getOperand(0).getReg();
4131     SrcReg = MI.getOperand(1).getReg();
4132     if (isIntRegForSubInst(DstReg)) {
4133       // Rd = add(r29,#u6:2)
4134       if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
4135         HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
4136         isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
4137         return HexagonII::HSIG_A;
4138       // Rx = add(Rx,#s7)
4139       if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
4140           isInt<7>(MI.getOperand(2).getImm()))
4141         return HexagonII::HSIG_A;
4142       // Rd = add(Rs,#1)
4143       // Rd = add(Rs,#-1)
4144       if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
4145           ((MI.getOperand(2).getImm() == 1) ||
4146           (MI.getOperand(2).getImm() == -1)))
4147         return HexagonII::HSIG_A;
4148     }
4149     break;
4150   case Hexagon::A2_add:
4151   case Hexagon::dup_A2_add:
4152     // Rx = add(Rx,Rs)
4153     DstReg = MI.getOperand(0).getReg();
4154     Src1Reg = MI.getOperand(1).getReg();
4155     Src2Reg = MI.getOperand(2).getReg();
4156     if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
4157         isIntRegForSubInst(Src2Reg))
4158       return HexagonII::HSIG_A;
4159     break;
4160   case Hexagon::A2_andir:
4161   case Hexagon::dup_A2_andir:
4162     // Same as zxtb.
4163     // Rd16=and(Rs16,#255)
4164     // Rd16=and(Rs16,#1)
4165     DstReg = MI.getOperand(0).getReg();
4166     SrcReg = MI.getOperand(1).getReg();
4167     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
4168         MI.getOperand(2).isImm() &&
4169         ((MI.getOperand(2).getImm() == 1) ||
4170         (MI.getOperand(2).getImm() == 255)))
4171       return HexagonII::HSIG_A;
4172     break;
4173   case Hexagon::A2_tfr:
4174   case Hexagon::dup_A2_tfr:
4175     // Rd = Rs
4176     DstReg = MI.getOperand(0).getReg();
4177     SrcReg = MI.getOperand(1).getReg();
4178     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4179       return HexagonII::HSIG_A;
4180     break;
4181   case Hexagon::A2_tfrsi:
4182   case Hexagon::dup_A2_tfrsi:
4183     // Rd = #u6
4184     // Do not test for #u6 size since the const is getting extended
4185     // regardless and compound could be formed.
4186     // Rd = #-1
4187     DstReg = MI.getOperand(0).getReg();
4188     if (isIntRegForSubInst(DstReg))
4189       return HexagonII::HSIG_A;
4190     break;
4191   case Hexagon::C2_cmoveit:
4192   case Hexagon::C2_cmovenewit:
4193   case Hexagon::C2_cmoveif:
4194   case Hexagon::C2_cmovenewif:
4195   case Hexagon::dup_C2_cmoveit:
4196   case Hexagon::dup_C2_cmovenewit:
4197   case Hexagon::dup_C2_cmoveif:
4198   case Hexagon::dup_C2_cmovenewif:
4199     // if ([!]P0[.new]) Rd = #0
4200     // Actual form:
4201     // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
4202     DstReg = MI.getOperand(0).getReg();
4203     SrcReg = MI.getOperand(1).getReg();
4204     if (isIntRegForSubInst(DstReg) &&
4205         Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
4206         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
4207       return HexagonII::HSIG_A;
4208     break;
4209   case Hexagon::C2_cmpeqi:
4210   case Hexagon::dup_C2_cmpeqi:
4211     // P0 = cmp.eq(Rs,#u2)
4212     DstReg = MI.getOperand(0).getReg();
4213     SrcReg = MI.getOperand(1).getReg();
4214     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
4215         Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
4216         MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
4217       return HexagonII::HSIG_A;
4218     break;
4219   case Hexagon::A2_combineii:
4220   case Hexagon::A4_combineii:
4221   case Hexagon::dup_A2_combineii:
4222   case Hexagon::dup_A4_combineii:
4223     // Rdd = combine(#u2,#U2)
4224     DstReg = MI.getOperand(0).getReg();
4225     if (isDblRegForSubInst(DstReg, HRI) &&
4226         ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
4227         (MI.getOperand(1).isGlobal() &&
4228         isUInt<2>(MI.getOperand(1).getOffset()))) &&
4229         ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
4230         (MI.getOperand(2).isGlobal() &&
4231         isUInt<2>(MI.getOperand(2).getOffset()))))
4232       return HexagonII::HSIG_A;
4233     break;
4234   case Hexagon::A4_combineri:
4235   case Hexagon::dup_A4_combineri:
4236     // Rdd = combine(Rs,#0)
4237     // Rdd = combine(Rs,#0)
4238     DstReg = MI.getOperand(0).getReg();
4239     SrcReg = MI.getOperand(1).getReg();
4240     if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4241         ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
4242         (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
4243       return HexagonII::HSIG_A;
4244     break;
4245   case Hexagon::A4_combineir:
4246   case Hexagon::dup_A4_combineir:
4247     // Rdd = combine(#0,Rs)
4248     DstReg = MI.getOperand(0).getReg();
4249     SrcReg = MI.getOperand(2).getReg();
4250     if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4251         ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
4252         (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
4253       return HexagonII::HSIG_A;
4254     break;
4255   case Hexagon::A2_sxtb:
4256   case Hexagon::A2_sxth:
4257   case Hexagon::A2_zxtb:
4258   case Hexagon::A2_zxth:
4259   case Hexagon::dup_A2_sxtb:
4260   case Hexagon::dup_A2_sxth:
4261   case Hexagon::dup_A2_zxtb:
4262   case Hexagon::dup_A2_zxth:
4263     // Rd = sxth/sxtb/zxtb/zxth(Rs)
4264     DstReg = MI.getOperand(0).getReg();
4265     SrcReg = MI.getOperand(1).getReg();
4266     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4267       return HexagonII::HSIG_A;
4268     break;
4269   }
4270 
4271   return HexagonII::HSIG_None;
4272 }
4273 
4274 short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr &MI) const {
4275   return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
4276 }
4277 
4278 unsigned HexagonInstrInfo::getInstrTimingClassLatency(
4279       const InstrItineraryData *ItinData, const MachineInstr &MI) const {
4280   // Default to one cycle for no itinerary. However, an "empty" itinerary may
4281   // still have a MinLatency property, which getStageLatency checks.
4282   if (!ItinData)
4283     return getInstrLatency(ItinData, MI);
4284 
4285   if (MI.isTransient())
4286     return 0;
4287   return ItinData->getStageLatency(MI.getDesc().getSchedClass());
4288 }
4289 
4290 /// getOperandLatency - Compute and return the use operand latency of a given
4291 /// pair of def and use.
4292 /// In most cases, the static scheduling itinerary was enough to determine the
4293 /// operand latency. But it may not be possible for instructions with variable
4294 /// number of defs / uses.
4295 ///
4296 /// This is a raw interface to the itinerary that may be directly overriden by
4297 /// a target. Use computeOperandLatency to get the best estimate of latency.
4298 int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4299                                         const MachineInstr &DefMI,
4300                                         unsigned DefIdx,
4301                                         const MachineInstr &UseMI,
4302                                         unsigned UseIdx) const {
4303   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4304 
4305   // Get DefIdx and UseIdx for super registers.
4306   const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4307 
4308   if (DefMO.isReg() && DefMO.getReg().isPhysical()) {
4309     if (DefMO.isImplicit()) {
4310       for (MCPhysReg SR : HRI.superregs(DefMO.getReg())) {
4311         int Idx = DefMI.findRegisterDefOperandIdx(SR, false, false, &HRI);
4312         if (Idx != -1) {
4313           DefIdx = Idx;
4314           break;
4315         }
4316       }
4317     }
4318 
4319     const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
4320     if (UseMO.isImplicit()) {
4321       for (MCPhysReg SR : HRI.superregs(UseMO.getReg())) {
4322         int Idx = UseMI.findRegisterUseOperandIdx(SR, false, &HRI);
4323         if (Idx != -1) {
4324           UseIdx = Idx;
4325           break;
4326         }
4327       }
4328     }
4329   }
4330 
4331   int Latency = TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
4332                                                    UseMI, UseIdx);
4333   if (!Latency)
4334     // We should never have 0 cycle latency between two instructions unless
4335     // they can be packetized together. However, this decision can't be made
4336     // here.
4337     Latency = 1;
4338   return Latency;
4339 }
4340 
4341 // inverts the predication logic.
4342 // p -> NotP
4343 // NotP -> P
4344 bool HexagonInstrInfo::getInvertedPredSense(
4345       SmallVectorImpl<MachineOperand> &Cond) const {
4346   if (Cond.empty())
4347     return false;
4348   unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
4349   Cond[0].setImm(Opc);
4350   return true;
4351 }
4352 
4353 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
4354   int InvPredOpcode;
4355   InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
4356                                         : Hexagon::getTruePredOpcode(Opc);
4357   if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
4358     return InvPredOpcode;
4359 
4360   llvm_unreachable("Unexpected predicated instruction");
4361 }
4362 
4363 // Returns the max value that doesn't need to be extended.
4364 int HexagonInstrInfo::getMaxValue(const MachineInstr &MI) const {
4365   const uint64_t F = MI.getDesc().TSFlags;
4366   unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4367                     & HexagonII::ExtentSignedMask;
4368   unsigned bits =  (F >> HexagonII::ExtentBitsPos)
4369                     & HexagonII::ExtentBitsMask;
4370 
4371   if (isSigned) // if value is signed
4372     return ~(-1U << (bits - 1));
4373   else
4374     return ~(-1U << bits);
4375 }
4376 
4377 
4378 bool HexagonInstrInfo::isAddrModeWithOffset(const MachineInstr &MI) const {
4379   switch (MI.getOpcode()) {
4380   case Hexagon::L2_loadrbgp:
4381   case Hexagon::L2_loadrdgp:
4382   case Hexagon::L2_loadrhgp:
4383   case Hexagon::L2_loadrigp:
4384   case Hexagon::L2_loadrubgp:
4385   case Hexagon::L2_loadruhgp:
4386   case Hexagon::S2_storerbgp:
4387   case Hexagon::S2_storerbnewgp:
4388   case Hexagon::S2_storerhgp:
4389   case Hexagon::S2_storerhnewgp:
4390   case Hexagon::S2_storerigp:
4391   case Hexagon::S2_storerinewgp:
4392   case Hexagon::S2_storerdgp:
4393   case Hexagon::S2_storerfgp:
4394     return true;
4395   }
4396   const uint64_t F = MI.getDesc().TSFlags;
4397   unsigned addrMode =
4398     ((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
4399   // Disallow any base+offset instruction. The assembler does not yet reorder
4400   // based up any zero offset instruction.
4401   return (addrMode == HexagonII::BaseRegOffset ||
4402           addrMode == HexagonII::BaseImmOffset ||
4403           addrMode == HexagonII::BaseLongOffset);
4404 }
4405 
4406 bool HexagonInstrInfo::isPureSlot0(const MachineInstr &MI) const {
4407   // Workaround for the Global Scheduler. Sometimes, it creates
4408   // A4_ext as a Pseudo instruction and calls this function to see if
4409   // it can be added to an existing bundle. Since the instruction doesn't
4410   // belong to any BB yet, we can't use getUnits API.
4411   if (MI.getOpcode() == Hexagon::A4_ext)
4412     return false;
4413 
4414   unsigned FuncUnits = getUnits(MI);
4415   return HexagonFUnits::isSlot0Only(FuncUnits);
4416 }
4417 
4418 bool HexagonInstrInfo::isRestrictNoSlot1Store(const MachineInstr &MI) const {
4419   const uint64_t F = MI.getDesc().TSFlags;
4420   return ((F >> HexagonII::RestrictNoSlot1StorePos) &
4421           HexagonII::RestrictNoSlot1StoreMask);
4422 }
4423 
4424 void HexagonInstrInfo::changeDuplexOpcode(MachineBasicBlock::instr_iterator MII,
4425                                           bool ToBigInstrs) const {
4426   int Opcode = -1;
4427   if (ToBigInstrs) { // To BigCore Instr.
4428     // Check if the instruction can form a Duplex.
4429     if (getDuplexCandidateGroup(*MII))
4430       // Get the opcode marked "dup_*" tag.
4431       Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4432   } else // To TinyCore Instr.
4433     Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4434 
4435   // Change the opcode of the instruction.
4436   if (Opcode >= 0)
4437     MII->setDesc(get(Opcode));
4438 }
4439 
4440 // This function is used to translate instructions to facilitate generating
4441 // Duplexes on TinyCore.
4442 void HexagonInstrInfo::translateInstrsForDup(MachineFunction &MF,
4443                                              bool ToBigInstrs) const {
4444   for (auto &MB : MF)
4445     for (MachineBasicBlock::instr_iterator Instr = MB.instr_begin(),
4446                                            End = MB.instr_end();
4447          Instr != End; ++Instr)
4448       changeDuplexOpcode(Instr, ToBigInstrs);
4449 }
4450 
4451 // This is a specialized form of above function.
4452 void HexagonInstrInfo::translateInstrsForDup(
4453     MachineBasicBlock::instr_iterator MII, bool ToBigInstrs) const {
4454   MachineBasicBlock *MBB = MII->getParent();
4455   while ((MII != MBB->instr_end()) && MII->isInsideBundle()) {
4456     changeDuplexOpcode(MII, ToBigInstrs);
4457     ++MII;
4458   }
4459 }
4460 
4461 unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr &MI) const {
4462   using namespace HexagonII;
4463 
4464   const uint64_t F = MI.getDesc().TSFlags;
4465   unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
4466   unsigned Size = getMemAccessSizeInBytes(MemAccessSize(S));
4467   if (Size != 0)
4468     return Size;
4469   // Y2_dcfetchbo is special
4470   if (MI.getOpcode() == Hexagon::Y2_dcfetchbo)
4471     return HexagonII::DoubleWordAccess;
4472 
4473   // Handle vector access sizes.
4474   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4475   switch (S) {
4476     case HexagonII::HVXVectorAccess:
4477       return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4478     default:
4479       llvm_unreachable("Unexpected instruction");
4480   }
4481 }
4482 
4483 // Returns the min value that doesn't need to be extended.
4484 int HexagonInstrInfo::getMinValue(const MachineInstr &MI) const {
4485   const uint64_t F = MI.getDesc().TSFlags;
4486   unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4487                     & HexagonII::ExtentSignedMask;
4488   unsigned bits =  (F >> HexagonII::ExtentBitsPos)
4489                     & HexagonII::ExtentBitsMask;
4490 
4491   if (isSigned) // if value is signed
4492     return -1U << (bits - 1);
4493   else
4494     return 0;
4495 }
4496 
4497 // Returns opcode of the non-extended equivalent instruction.
4498 short HexagonInstrInfo::getNonExtOpcode(const MachineInstr &MI) const {
4499   // Check if the instruction has a register form that uses register in place
4500   // of the extended operand, if so return that as the non-extended form.
4501   short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
4502     if (NonExtOpcode >= 0)
4503       return NonExtOpcode;
4504 
4505   if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
4506     // Check addressing mode and retrieve non-ext equivalent instruction.
4507     switch (getAddrMode(MI)) {
4508     case HexagonII::Absolute:
4509       return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
4510     case HexagonII::BaseImmOffset:
4511       return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
4512     case HexagonII::BaseLongOffset:
4513       return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
4514 
4515     default:
4516       return -1;
4517     }
4518   }
4519   return -1;
4520 }
4521 
4522 bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
4523       Register &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4524   if (Cond.empty())
4525     return false;
4526   assert(Cond.size() == 2);
4527   if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4528     LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
4529     return false;
4530   }
4531   PredReg = Cond[1].getReg();
4532   PredRegPos = 1;
4533   // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4534   PredRegFlags = 0;
4535   if (Cond[1].isImplicit())
4536     PredRegFlags = RegState::Implicit;
4537   if (Cond[1].isUndef())
4538     PredRegFlags |= RegState::Undef;
4539   return true;
4540 }
4541 
4542 short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr &MI) const {
4543   return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
4544 }
4545 
4546 short HexagonInstrInfo::getRegForm(const MachineInstr &MI) const {
4547   return Hexagon::getRegForm(MI.getOpcode());
4548 }
4549 
4550 // Return the number of bytes required to encode the instruction.
4551 // Hexagon instructions are fixed length, 4 bytes, unless they
4552 // use a constant extender, which requires another 4 bytes.
4553 // For debug instructions and prolog labels, return 0.
4554 unsigned HexagonInstrInfo::getSize(const MachineInstr &MI) const {
4555   if (MI.isDebugInstr() || MI.isPosition())
4556     return 0;
4557 
4558   unsigned Size = MI.getDesc().getSize();
4559   if (!Size)
4560     // Assume the default insn size in case it cannot be determined
4561     // for whatever reason.
4562     Size = HEXAGON_INSTR_SIZE;
4563 
4564   if (isConstExtended(MI) || isExtended(MI))
4565     Size += HEXAGON_INSTR_SIZE;
4566 
4567   // Try and compute number of instructions in asm.
4568   if (BranchRelaxAsmLarge && MI.getOpcode() == Hexagon::INLINEASM) {
4569     const MachineBasicBlock &MBB = *MI.getParent();
4570     const MachineFunction *MF = MBB.getParent();
4571     const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4572 
4573     // Count the number of register definitions to find the asm string.
4574     unsigned NumDefs = 0;
4575     for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4576          ++NumDefs)
4577       assert(NumDefs != MI.getNumOperands()-2 && "No asm string?");
4578 
4579     assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?");
4580     // Disassemble the AsmStr and approximate number of instructions.
4581     const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4582     Size = getInlineAsmLength(AsmStr, *MAI);
4583   }
4584 
4585   return Size;
4586 }
4587 
4588 uint64_t HexagonInstrInfo::getType(const MachineInstr &MI) const {
4589   const uint64_t F = MI.getDesc().TSFlags;
4590   return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4591 }
4592 
4593 InstrStage::FuncUnits HexagonInstrInfo::getUnits(const MachineInstr &MI) const {
4594   const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4595   const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4596 
4597   return IS.getUnits();
4598 }
4599 
4600 // Calculate size of the basic block without debug instructions.
4601 unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
4602   return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4603 }
4604 
4605 unsigned HexagonInstrInfo::nonDbgBundleSize(
4606       MachineBasicBlock::const_iterator BundleHead) const {
4607   assert(BundleHead->isBundle() && "Not a bundle header");
4608   auto MII = BundleHead.getInstrIterator();
4609   // Skip the bundle header.
4610   return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4611 }
4612 
4613 /// immediateExtend - Changes the instruction in place to one using an immediate
4614 /// extender.
4615 void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
4616   assert((isExtendable(MI)||isConstExtended(MI)) &&
4617                                "Instruction must be extendable");
4618   // Find which operand is extendable.
4619   short ExtOpNum = getCExtOpNum(MI);
4620   MachineOperand &MO = MI.getOperand(ExtOpNum);
4621   // This needs to be something we understand.
4622   assert((MO.isMBB() || MO.isImm()) &&
4623          "Branch with unknown extendable field type");
4624   // Mark given operand as extended.
4625   MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
4626 }
4627 
4628 bool HexagonInstrInfo::invertAndChangeJumpTarget(
4629       MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4630   LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
4631                     << printMBBReference(*NewTarget);
4632              MI.dump(););
4633   assert(MI.isBranch());
4634   unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4635   int TargetPos = MI.getNumOperands() - 1;
4636   // In general branch target is the last operand,
4637   // but some implicit defs added at the end might change it.
4638   while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4639     --TargetPos;
4640   assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB());
4641   MI.getOperand(TargetPos).setMBB(NewTarget);
4642   if (EnableBranchPrediction && isPredicatedNew(MI)) {
4643     NewOpcode = reversePrediction(NewOpcode);
4644   }
4645   MI.setDesc(get(NewOpcode));
4646   return true;
4647 }
4648 
4649 void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
4650   /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4651   MachineFunction::iterator A = MF.begin();
4652   MachineBasicBlock &B = *A;
4653   MachineBasicBlock::iterator I = B.begin();
4654   DebugLoc DL = I->getDebugLoc();
4655   MachineInstr *NewMI;
4656 
4657   for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4658        insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4659     NewMI = BuildMI(B, I, DL, get(insn));
4660     LLVM_DEBUG(dbgs() << "\n"
4661                       << getName(NewMI->getOpcode())
4662                       << "  Class: " << NewMI->getDesc().getSchedClass());
4663     NewMI->eraseFromParent();
4664   }
4665   /* --- The code above is used to generate complete set of Hexagon Insn --- */
4666 }
4667 
4668 // inverts the predication logic.
4669 // p -> NotP
4670 // NotP -> P
4671 bool HexagonInstrInfo::reversePredSense(MachineInstr &MI) const {
4672   LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
4673   MI.setDesc(get(getInvertedPredicatedOpcode(MI.getOpcode())));
4674   return true;
4675 }
4676 
4677 // Reverse the branch prediction.
4678 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4679   int PredRevOpcode = -1;
4680   if (isPredictedTaken(Opcode))
4681     PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4682   else
4683     PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4684   assert(PredRevOpcode > 0);
4685   return PredRevOpcode;
4686 }
4687 
4688 // TODO: Add more rigorous validation.
4689 bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
4690       const {
4691   return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4692 }
4693 
4694 void HexagonInstrInfo::
4695 setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const {
4696   assert(MIB->isBundle());
4697   MachineOperand &Operand = MIB->getOperand(0);
4698   if (Operand.isImm())
4699     Operand.setImm(Operand.getImm() | memShufDisabledMask);
4700   else
4701     MIB->addOperand(MachineOperand::CreateImm(memShufDisabledMask));
4702 }
4703 
4704 bool HexagonInstrInfo::getBundleNoShuf(const MachineInstr &MIB) const {
4705   assert(MIB.isBundle());
4706   const MachineOperand &Operand = MIB.getOperand(0);
4707   return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4708 }
4709 
4710 // Addressing mode relations.
4711 short HexagonInstrInfo::changeAddrMode_abs_io(short Opc) const {
4712   return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4713 }
4714 
4715 short HexagonInstrInfo::changeAddrMode_io_abs(short Opc) const {
4716   return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4717 }
4718 
4719 short HexagonInstrInfo::changeAddrMode_io_pi(short Opc) const {
4720   return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4721 }
4722 
4723 short HexagonInstrInfo::changeAddrMode_io_rr(short Opc) const {
4724   return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4725 }
4726 
4727 short HexagonInstrInfo::changeAddrMode_pi_io(short Opc) const {
4728   return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4729 }
4730 
4731 short HexagonInstrInfo::changeAddrMode_rr_io(short Opc) const {
4732   return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4733 }
4734 
4735 short HexagonInstrInfo::changeAddrMode_rr_ur(short Opc) const {
4736   return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4737 }
4738 
4739 short HexagonInstrInfo::changeAddrMode_ur_rr(short Opc) const {
4740   return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4741 }
4742 
4743 MCInst HexagonInstrInfo::getNop() const {
4744   static const MCInst Nop = MCInstBuilder(Hexagon::A2_nop);
4745 
4746   return MCInstBuilder(Hexagon::BUNDLE)
4747     .addImm(0)
4748     .addInst(&Nop);
4749 }
4750