xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp (revision 8bcb0991864975618c09697b1aca10683346d9f0)
1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Thumb2InstrInfo.h"
14 #include "ARMMachineFunctionInfo.h"
15 #include "MCTargetDesc/ARMAddressingModes.h"
16 #include "llvm/CodeGen/MachineBasicBlock.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineOperand.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/IR/DebugLoc.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCInstrDesc.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include <cassert>
33 
34 using namespace llvm;
35 
36 static cl::opt<bool>
37 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
38            cl::desc("Use old-style Thumb2 if-conversion heuristics"),
39            cl::init(false));
40 
41 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
42     : ARMBaseInstrInfo(STI) {}
43 
44 /// Return the noop instruction to use for a noop.
45 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const {
46   NopInst.setOpcode(ARM::tHINT);
47   NopInst.addOperand(MCOperand::createImm(0));
48   NopInst.addOperand(MCOperand::createImm(ARMCC::AL));
49   NopInst.addOperand(MCOperand::createReg(0));
50 }
51 
52 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
53   // FIXME
54   return 0;
55 }
56 
57 void
58 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
59                                          MachineBasicBlock *NewDest) const {
60   MachineBasicBlock *MBB = Tail->getParent();
61   ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
62   if (!AFI->hasITBlocks() || Tail->isBranch()) {
63     TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
64     return;
65   }
66 
67   // If the first instruction of Tail is predicated, we may have to update
68   // the IT instruction.
69   unsigned PredReg = 0;
70   ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
71   MachineBasicBlock::iterator MBBI = Tail;
72   if (CC != ARMCC::AL)
73     // Expecting at least the t2IT instruction before it.
74     --MBBI;
75 
76   // Actually replace the tail.
77   TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
78 
79   // Fix up IT.
80   if (CC != ARMCC::AL) {
81     MachineBasicBlock::iterator E = MBB->begin();
82     unsigned Count = 4; // At most 4 instructions in an IT block.
83     while (Count && MBBI != E) {
84       if (MBBI->isDebugInstr()) {
85         --MBBI;
86         continue;
87       }
88       if (MBBI->getOpcode() == ARM::t2IT) {
89         unsigned Mask = MBBI->getOperand(1).getImm();
90         if (Count == 4)
91           MBBI->eraseFromParent();
92         else {
93           unsigned MaskOn = 1 << Count;
94           unsigned MaskOff = ~(MaskOn - 1);
95           MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
96         }
97         return;
98       }
99       --MBBI;
100       --Count;
101     }
102 
103     // Ctrl flow can reach here if branch folding is run before IT block
104     // formation pass.
105   }
106 }
107 
108 bool
109 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
110                                      MachineBasicBlock::iterator MBBI) const {
111   while (MBBI->isDebugInstr()) {
112     ++MBBI;
113     if (MBBI == MBB.end())
114       return false;
115   }
116 
117   unsigned PredReg = 0;
118   return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
119 }
120 
121 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
122                                   MachineBasicBlock::iterator I,
123                                   const DebugLoc &DL, unsigned DestReg,
124                                   unsigned SrcReg, bool KillSrc) const {
125   // Handle SPR, DPR, and QPR copies.
126   if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
127     return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
128 
129   BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg)
130       .addReg(SrcReg, getKillRegState(KillSrc))
131       .add(predOps(ARMCC::AL));
132 }
133 
134 void Thumb2InstrInfo::
135 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
136                     unsigned SrcReg, bool isKill, int FI,
137                     const TargetRegisterClass *RC,
138                     const TargetRegisterInfo *TRI) const {
139   DebugLoc DL;
140   if (I != MBB.end()) DL = I->getDebugLoc();
141 
142   MachineFunction &MF = *MBB.getParent();
143   MachineFrameInfo &MFI = MF.getFrameInfo();
144   MachineMemOperand *MMO = MF.getMachineMemOperand(
145       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
146       MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
147 
148   if (ARM::GPRRegClass.hasSubClassEq(RC)) {
149     BuildMI(MBB, I, DL, get(ARM::t2STRi12))
150         .addReg(SrcReg, getKillRegState(isKill))
151         .addFrameIndex(FI)
152         .addImm(0)
153         .addMemOperand(MMO)
154         .add(predOps(ARMCC::AL));
155     return;
156   }
157 
158   if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
159     // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
160     // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
161     // otherwise).
162     if (Register::isVirtualRegister(SrcReg)) {
163       MachineRegisterInfo *MRI = &MF.getRegInfo();
164       MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass);
165     }
166 
167     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
168     AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
169     AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
170     MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
171     return;
172   }
173 
174   ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI);
175 }
176 
177 void Thumb2InstrInfo::
178 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
179                      unsigned DestReg, int FI,
180                      const TargetRegisterClass *RC,
181                      const TargetRegisterInfo *TRI) const {
182   MachineFunction &MF = *MBB.getParent();
183   MachineFrameInfo &MFI = MF.getFrameInfo();
184   MachineMemOperand *MMO = MF.getMachineMemOperand(
185       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
186       MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
187   DebugLoc DL;
188   if (I != MBB.end()) DL = I->getDebugLoc();
189 
190   if (ARM::GPRRegClass.hasSubClassEq(RC)) {
191     BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
192         .addFrameIndex(FI)
193         .addImm(0)
194         .addMemOperand(MMO)
195         .add(predOps(ARMCC::AL));
196     return;
197   }
198 
199   if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
200     // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
201     // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
202     // otherwise).
203     if (Register::isVirtualRegister(DestReg)) {
204       MachineRegisterInfo *MRI = &MF.getRegInfo();
205       MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass);
206     }
207 
208     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
209     AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
210     AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
211     MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
212 
213     if (Register::isPhysicalRegister(DestReg))
214       MIB.addReg(DestReg, RegState::ImplicitDefine);
215     return;
216   }
217 
218   ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
219 }
220 
221 void Thumb2InstrInfo::expandLoadStackGuard(
222     MachineBasicBlock::iterator MI) const {
223   MachineFunction &MF = *MI->getParent()->getParent();
224   if (MF.getTarget().isPositionIndependent())
225     expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
226   else
227     expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
228 }
229 
230 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
231                                   MachineBasicBlock::iterator &MBBI,
232                                   const DebugLoc &dl, unsigned DestReg,
233                                   unsigned BaseReg, int NumBytes,
234                                   ARMCC::CondCodes Pred, unsigned PredReg,
235                                   const ARMBaseInstrInfo &TII,
236                                   unsigned MIFlags) {
237   if (NumBytes == 0 && DestReg != BaseReg) {
238     BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
239       .addReg(BaseReg, RegState::Kill)
240       .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
241     return;
242   }
243 
244   bool isSub = NumBytes < 0;
245   if (isSub) NumBytes = -NumBytes;
246 
247   // If profitable, use a movw or movt to materialize the offset.
248   // FIXME: Use the scavenger to grab a scratch register.
249   if (DestReg != ARM::SP && DestReg != BaseReg &&
250       NumBytes >= 4096 &&
251       ARM_AM::getT2SOImmVal(NumBytes) == -1) {
252     bool Fits = false;
253     if (NumBytes < 65536) {
254       // Use a movw to materialize the 16-bit constant.
255       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
256         .addImm(NumBytes)
257         .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
258       Fits = true;
259     } else if ((NumBytes & 0xffff) == 0) {
260       // Use a movt to materialize the 32-bit constant.
261       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
262         .addReg(DestReg)
263         .addImm(NumBytes >> 16)
264         .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
265       Fits = true;
266     }
267 
268     if (Fits) {
269       if (isSub) {
270         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
271             .addReg(BaseReg)
272             .addReg(DestReg, RegState::Kill)
273             .add(predOps(Pred, PredReg))
274             .add(condCodeOp())
275             .setMIFlags(MIFlags);
276       } else {
277         // Here we know that DestReg is not SP but we do not
278         // know anything about BaseReg. t2ADDrr is an invalid
279         // instruction is SP is used as the second argument, but
280         // is fine if SP is the first argument. To be sure we
281         // do not generate invalid encoding, put BaseReg first.
282         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
283             .addReg(BaseReg)
284             .addReg(DestReg, RegState::Kill)
285             .add(predOps(Pred, PredReg))
286             .add(condCodeOp())
287             .setMIFlags(MIFlags);
288       }
289       return;
290     }
291   }
292 
293   while (NumBytes) {
294     unsigned ThisVal = NumBytes;
295     unsigned Opc = 0;
296     if (DestReg == ARM::SP && BaseReg != ARM::SP) {
297       // mov sp, rn. Note t2MOVr cannot be used.
298       BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
299           .addReg(BaseReg)
300           .setMIFlags(MIFlags)
301           .add(predOps(ARMCC::AL));
302       BaseReg = ARM::SP;
303       continue;
304     }
305 
306     bool HasCCOut = true;
307     if (BaseReg == ARM::SP) {
308       // sub sp, sp, #imm7
309       if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) {
310         assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
311         Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
312         BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
313             .addReg(BaseReg)
314             .addImm(ThisVal / 4)
315             .setMIFlags(MIFlags)
316             .add(predOps(ARMCC::AL));
317         NumBytes = 0;
318         continue;
319       }
320 
321       // sub rd, sp, so_imm
322       Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri;
323       if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
324         NumBytes = 0;
325       } else {
326         // FIXME: Move this to ARMAddressingModes.h?
327         unsigned RotAmt = countLeadingZeros(ThisVal);
328         ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
329         NumBytes &= ~ThisVal;
330         assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
331                "Bit extraction didn't work?");
332       }
333     } else {
334       assert(DestReg != ARM::SP && BaseReg != ARM::SP);
335       Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri;
336       if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
337         NumBytes = 0;
338       } else if (ThisVal < 4096) {
339         Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12;
340         HasCCOut = false;
341         NumBytes = 0;
342       } else {
343         // FIXME: Move this to ARMAddressingModes.h?
344         unsigned RotAmt = countLeadingZeros(ThisVal);
345         ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
346         NumBytes &= ~ThisVal;
347         assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
348                "Bit extraction didn't work?");
349       }
350     }
351 
352     // Build the new ADD / SUB.
353     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
354                                   .addReg(BaseReg, RegState::Kill)
355                                   .addImm(ThisVal)
356                                   .add(predOps(ARMCC::AL))
357                                   .setMIFlags(MIFlags);
358     if (HasCCOut)
359       MIB.add(condCodeOp());
360 
361     BaseReg = DestReg;
362   }
363 }
364 
365 static unsigned
366 negativeOffsetOpcode(unsigned opcode)
367 {
368   switch (opcode) {
369   case ARM::t2LDRi12:   return ARM::t2LDRi8;
370   case ARM::t2LDRHi12:  return ARM::t2LDRHi8;
371   case ARM::t2LDRBi12:  return ARM::t2LDRBi8;
372   case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
373   case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
374   case ARM::t2STRi12:   return ARM::t2STRi8;
375   case ARM::t2STRBi12:  return ARM::t2STRBi8;
376   case ARM::t2STRHi12:  return ARM::t2STRHi8;
377   case ARM::t2PLDi12:   return ARM::t2PLDi8;
378 
379   case ARM::t2LDRi8:
380   case ARM::t2LDRHi8:
381   case ARM::t2LDRBi8:
382   case ARM::t2LDRSHi8:
383   case ARM::t2LDRSBi8:
384   case ARM::t2STRi8:
385   case ARM::t2STRBi8:
386   case ARM::t2STRHi8:
387   case ARM::t2PLDi8:
388     return opcode;
389 
390   default:
391     break;
392   }
393 
394   return 0;
395 }
396 
397 static unsigned
398 positiveOffsetOpcode(unsigned opcode)
399 {
400   switch (opcode) {
401   case ARM::t2LDRi8:   return ARM::t2LDRi12;
402   case ARM::t2LDRHi8:  return ARM::t2LDRHi12;
403   case ARM::t2LDRBi8:  return ARM::t2LDRBi12;
404   case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
405   case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
406   case ARM::t2STRi8:   return ARM::t2STRi12;
407   case ARM::t2STRBi8:  return ARM::t2STRBi12;
408   case ARM::t2STRHi8:  return ARM::t2STRHi12;
409   case ARM::t2PLDi8:   return ARM::t2PLDi12;
410 
411   case ARM::t2LDRi12:
412   case ARM::t2LDRHi12:
413   case ARM::t2LDRBi12:
414   case ARM::t2LDRSHi12:
415   case ARM::t2LDRSBi12:
416   case ARM::t2STRi12:
417   case ARM::t2STRBi12:
418   case ARM::t2STRHi12:
419   case ARM::t2PLDi12:
420     return opcode;
421 
422   default:
423     break;
424   }
425 
426   return 0;
427 }
428 
429 static unsigned
430 immediateOffsetOpcode(unsigned opcode)
431 {
432   switch (opcode) {
433   case ARM::t2LDRs:   return ARM::t2LDRi12;
434   case ARM::t2LDRHs:  return ARM::t2LDRHi12;
435   case ARM::t2LDRBs:  return ARM::t2LDRBi12;
436   case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
437   case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
438   case ARM::t2STRs:   return ARM::t2STRi12;
439   case ARM::t2STRBs:  return ARM::t2STRBi12;
440   case ARM::t2STRHs:  return ARM::t2STRHi12;
441   case ARM::t2PLDs:   return ARM::t2PLDi12;
442 
443   case ARM::t2LDRi12:
444   case ARM::t2LDRHi12:
445   case ARM::t2LDRBi12:
446   case ARM::t2LDRSHi12:
447   case ARM::t2LDRSBi12:
448   case ARM::t2STRi12:
449   case ARM::t2STRBi12:
450   case ARM::t2STRHi12:
451   case ARM::t2PLDi12:
452   case ARM::t2LDRi8:
453   case ARM::t2LDRHi8:
454   case ARM::t2LDRBi8:
455   case ARM::t2LDRSHi8:
456   case ARM::t2LDRSBi8:
457   case ARM::t2STRi8:
458   case ARM::t2STRBi8:
459   case ARM::t2STRHi8:
460   case ARM::t2PLDi8:
461     return opcode;
462 
463   default:
464     break;
465   }
466 
467   return 0;
468 }
469 
470 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
471                                unsigned FrameReg, int &Offset,
472                                const ARMBaseInstrInfo &TII,
473                                const TargetRegisterInfo *TRI) {
474   unsigned Opcode = MI.getOpcode();
475   const MCInstrDesc &Desc = MI.getDesc();
476   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
477   bool isSub = false;
478 
479   MachineFunction &MF = *MI.getParent()->getParent();
480   const TargetRegisterClass *RegClass =
481       TII.getRegClass(Desc, FrameRegIdx, TRI, MF);
482 
483   // Memory operands in inline assembly always use AddrModeT2_i12.
484   if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
485     AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
486 
487   if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
488     Offset += MI.getOperand(FrameRegIdx+1).getImm();
489 
490     unsigned PredReg;
491     if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
492         !MI.definesRegister(ARM::CPSR)) {
493       // Turn it into a move.
494       MI.setDesc(TII.get(ARM::tMOVr));
495       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
496       // Remove offset and remaining explicit predicate operands.
497       do MI.RemoveOperand(FrameRegIdx+1);
498       while (MI.getNumOperands() > FrameRegIdx+1);
499       MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI);
500       MIB.add(predOps(ARMCC::AL));
501       return true;
502     }
503 
504     bool HasCCOut = Opcode != ARM::t2ADDri12;
505 
506     if (Offset < 0) {
507       Offset = -Offset;
508       isSub = true;
509       MI.setDesc(TII.get(ARM::t2SUBri));
510     } else {
511       MI.setDesc(TII.get(ARM::t2ADDri));
512     }
513 
514     // Common case: small offset, fits into instruction.
515     if (ARM_AM::getT2SOImmVal(Offset) != -1) {
516       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
517       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
518       // Add cc_out operand if the original instruction did not have one.
519       if (!HasCCOut)
520         MI.addOperand(MachineOperand::CreateReg(0, false));
521       Offset = 0;
522       return true;
523     }
524     // Another common case: imm12.
525     if (Offset < 4096 &&
526         (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
527       unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12;
528       MI.setDesc(TII.get(NewOpc));
529       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
530       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
531       // Remove the cc_out operand.
532       if (HasCCOut)
533         MI.RemoveOperand(MI.getNumOperands()-1);
534       Offset = 0;
535       return true;
536     }
537 
538     // Otherwise, extract 8 adjacent bits from the immediate into this
539     // t2ADDri/t2SUBri.
540     unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
541     unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
542 
543     // We will handle these bits from offset, clear them.
544     Offset &= ~ThisImmVal;
545 
546     assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
547            "Bit extraction didn't work?");
548     MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
549     // Add cc_out operand if the original instruction did not have one.
550     if (!HasCCOut)
551       MI.addOperand(MachineOperand::CreateReg(0, false));
552   } else {
553     // AddrMode4 and AddrMode6 cannot handle any offset.
554     if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
555       return false;
556 
557     // AddrModeT2_so cannot handle any offset. If there is no offset
558     // register then we change to an immediate version.
559     unsigned NewOpc = Opcode;
560     if (AddrMode == ARMII::AddrModeT2_so) {
561       Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg();
562       if (OffsetReg != 0) {
563         MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
564         return Offset == 0;
565       }
566 
567       MI.RemoveOperand(FrameRegIdx+1);
568       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
569       NewOpc = immediateOffsetOpcode(Opcode);
570       AddrMode = ARMII::AddrModeT2_i12;
571     }
572 
573     unsigned NumBits = 0;
574     unsigned Scale = 1;
575     if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) {
576       // i8 supports only negative, and i12 supports only positive, so
577       // based on Offset sign convert Opcode to the appropriate
578       // instruction
579       Offset += MI.getOperand(FrameRegIdx+1).getImm();
580       if (Offset < 0) {
581         NewOpc = negativeOffsetOpcode(Opcode);
582         NumBits = 8;
583         isSub = true;
584         Offset = -Offset;
585       } else {
586         NewOpc = positiveOffsetOpcode(Opcode);
587         NumBits = 12;
588       }
589     } else if (AddrMode == ARMII::AddrMode5) {
590       // VFP address mode.
591       const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
592       int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
593       if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
594         InstrOffs *= -1;
595       NumBits = 8;
596       Scale = 4;
597       Offset += InstrOffs * 4;
598       assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
599       if (Offset < 0) {
600         Offset = -Offset;
601         isSub = true;
602       }
603     } else if (AddrMode == ARMII::AddrMode5FP16) {
604       // VFP address mode.
605       const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
606       int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm());
607       if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub)
608         InstrOffs *= -1;
609       NumBits = 8;
610       Scale = 2;
611       Offset += InstrOffs * 2;
612       assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
613       if (Offset < 0) {
614         Offset = -Offset;
615         isSub = true;
616       }
617     } else if (AddrMode == ARMII::AddrModeT2_i7s4 ||
618                AddrMode == ARMII::AddrModeT2_i7s2 ||
619                AddrMode == ARMII::AddrModeT2_i7) {
620       Offset += MI.getOperand(FrameRegIdx + 1).getImm();
621       unsigned OffsetMask;
622       switch (AddrMode) {
623       case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break;
624       case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break;
625       default:                     NumBits = 7; OffsetMask = 0x0; break;
626       }
627       // MCInst operand expects already scaled value.
628       Scale = 1;
629       assert((Offset & OffsetMask) == 0 && "Can't encode this offset!");
630       (void)OffsetMask; // squash unused-variable warning at -NDEBUG
631     } else if (AddrMode == ARMII::AddrModeT2_i8s4) {
632       Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
633       NumBits = 8 + 2;
634       // MCInst operand expects already scaled value.
635       Scale = 1;
636       assert((Offset & 3) == 0 && "Can't encode this offset!");
637     } else if (AddrMode == ARMII::AddrModeT2_ldrex) {
638       Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
639       NumBits = 8; // 8 bits scaled by 4
640       Scale = 4;
641       assert((Offset & 3) == 0 && "Can't encode this offset!");
642     } else {
643       llvm_unreachable("Unsupported addressing mode!");
644     }
645 
646     if (NewOpc != Opcode)
647       MI.setDesc(TII.get(NewOpc));
648 
649     MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
650 
651     // Attempt to fold address computation
652     // Common case: small offset, fits into instruction. We need to make sure
653     // the register class is correct too, for instructions like the MVE
654     // VLDRH.32, which only accepts low tGPR registers.
655     int ImmedOffset = Offset / Scale;
656     unsigned Mask = (1 << NumBits) - 1;
657     if ((unsigned)Offset <= Mask * Scale &&
658         (Register::isVirtualRegister(FrameReg) ||
659          RegClass->contains(FrameReg))) {
660       if (Register::isVirtualRegister(FrameReg)) {
661         // Make sure the register class for the virtual register is correct
662         MachineRegisterInfo *MRI = &MF.getRegInfo();
663         if (!MRI->constrainRegClass(FrameReg, RegClass))
664           llvm_unreachable("Unable to constrain virtual register class.");
665       }
666 
667       // Replace the FrameIndex with fp/sp
668       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
669       if (isSub) {
670         if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
671           // FIXME: Not consistent.
672           ImmedOffset |= 1 << NumBits;
673         else
674           ImmedOffset = -ImmedOffset;
675       }
676       ImmOp.ChangeToImmediate(ImmedOffset);
677       Offset = 0;
678       return true;
679     }
680 
681     // Otherwise, offset doesn't fit. Pull in what we can to simplify
682     ImmedOffset = ImmedOffset & Mask;
683     if (isSub) {
684       if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
685         // FIXME: Not consistent.
686         ImmedOffset |= 1 << NumBits;
687       else {
688         ImmedOffset = -ImmedOffset;
689         if (ImmedOffset == 0)
690           // Change the opcode back if the encoded offset is zero.
691           MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
692       }
693     }
694     ImmOp.ChangeToImmediate(ImmedOffset);
695     Offset &= ~(Mask*Scale);
696   }
697 
698   Offset = (isSub) ? -Offset : Offset;
699   return Offset == 0 && (Register::isVirtualRegister(FrameReg) ||
700                          RegClass->contains(FrameReg));
701 }
702 
703 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
704                                            unsigned &PredReg) {
705   unsigned Opc = MI.getOpcode();
706   if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
707     return ARMCC::AL;
708   return getInstrPredicate(MI, PredReg);
709 }
710 
711 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
712   const MCInstrDesc &MCID = MI.getDesc();
713 
714   if (!MCID.OpInfo)
715     return -1;
716 
717   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
718     if (ARM::isVpred(MCID.OpInfo[i].OperandType))
719       return i;
720 
721   return -1;
722 }
723 
724 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI,
725                                             unsigned &PredReg) {
726   int PIdx = findFirstVPTPredOperandIdx(MI);
727   if (PIdx == -1) {
728     PredReg = 0;
729     return ARMVCC::None;
730   }
731 
732   PredReg = MI.getOperand(PIdx+1).getReg();
733   return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm();
734 }
735