1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Thumb2InstrInfo.h" 14 #include "ARMMachineFunctionInfo.h" 15 #include "ARMSubtarget.h" 16 #include "MCTargetDesc/ARMAddressingModes.h" 17 #include "llvm/CodeGen/MachineBasicBlock.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstr.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineMemOperand.h" 23 #include "llvm/CodeGen/MachineOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/IR/DebugLoc.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCInstBuilder.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cassert> 35 36 using namespace llvm; 37 38 static cl::opt<bool> 39 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 40 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 41 cl::init(false)); 42 43 static cl::opt<bool> 44 PreferNoCSEL("prefer-no-csel", cl::Hidden, 45 cl::desc("Prefer predicated Move to CSEL"), 46 cl::init(false)); 47 48 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 49 : ARMBaseInstrInfo(STI) {} 50 51 /// Return the noop instruction to use for a noop. 52 MCInst Thumb2InstrInfo::getNop() const { 53 return MCInstBuilder(ARM::tHINT).addImm(0).addImm(ARMCC::AL).addReg(0); 54 } 55 56 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 57 // FIXME 58 return 0; 59 } 60 61 void 62 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 63 MachineBasicBlock *NewDest) const { 64 MachineBasicBlock *MBB = Tail->getParent(); 65 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 66 if (!AFI->hasITBlocks() || Tail->isBranch()) { 67 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 68 return; 69 } 70 71 // If the first instruction of Tail is predicated, we may have to update 72 // the IT instruction. 73 Register PredReg; 74 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 75 MachineBasicBlock::iterator MBBI = Tail; 76 if (CC != ARMCC::AL) 77 // Expecting at least the t2IT instruction before it. 78 --MBBI; 79 80 // Actually replace the tail. 81 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 82 83 // Fix up IT. 84 if (CC != ARMCC::AL) { 85 MachineBasicBlock::iterator E = MBB->begin(); 86 unsigned Count = 4; // At most 4 instructions in an IT block. 87 while (Count && MBBI != E) { 88 if (MBBI->isDebugInstr()) { 89 --MBBI; 90 continue; 91 } 92 if (MBBI->getOpcode() == ARM::t2IT) { 93 unsigned Mask = MBBI->getOperand(1).getImm(); 94 if (Count == 4) 95 MBBI->eraseFromParent(); 96 else { 97 unsigned MaskOn = 1 << Count; 98 unsigned MaskOff = ~(MaskOn - 1); 99 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 100 } 101 return; 102 } 103 --MBBI; 104 --Count; 105 } 106 107 // Ctrl flow can reach here if branch folding is run before IT block 108 // formation pass. 109 } 110 } 111 112 bool 113 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 114 MachineBasicBlock::iterator MBBI) const { 115 while (MBBI->isDebugInstr()) { 116 ++MBBI; 117 if (MBBI == MBB.end()) 118 return false; 119 } 120 121 Register PredReg; 122 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 123 } 124 125 MachineInstr * 126 Thumb2InstrInfo::optimizeSelect(MachineInstr &MI, 127 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 128 bool PreferFalse) const { 129 // Try to use the base optimizeSelect, which uses canFoldIntoMOVCC to fold the 130 // MOVCC into another instruction. If that fails on 8.1-M fall back to using a 131 // CSEL. 132 MachineInstr *RV = ARMBaseInstrInfo::optimizeSelect(MI, SeenMIs, PreferFalse); 133 if (!RV && getSubtarget().hasV8_1MMainlineOps() && !PreferNoCSEL) { 134 Register DestReg = MI.getOperand(0).getReg(); 135 136 if (!DestReg.isVirtual()) 137 return nullptr; 138 139 MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 140 get(ARM::t2CSEL), DestReg) 141 .add(MI.getOperand(2)) 142 .add(MI.getOperand(1)) 143 .add(MI.getOperand(3)); 144 SeenMIs.insert(NewMI); 145 return NewMI; 146 } 147 return RV; 148 } 149 150 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 151 MachineBasicBlock::iterator I, 152 const DebugLoc &DL, MCRegister DestReg, 153 MCRegister SrcReg, bool KillSrc) const { 154 // Handle SPR, DPR, and QPR copies. 155 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 156 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 157 158 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 159 .addReg(SrcReg, getKillRegState(KillSrc)) 160 .add(predOps(ARMCC::AL)); 161 } 162 163 void Thumb2InstrInfo:: 164 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 165 Register SrcReg, bool isKill, int FI, 166 const TargetRegisterClass *RC, 167 const TargetRegisterInfo *TRI) const { 168 DebugLoc DL; 169 if (I != MBB.end()) DL = I->getDebugLoc(); 170 171 MachineFunction &MF = *MBB.getParent(); 172 MachineFrameInfo &MFI = MF.getFrameInfo(); 173 MachineMemOperand *MMO = MF.getMachineMemOperand( 174 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 175 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 176 177 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 178 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 179 .addReg(SrcReg, getKillRegState(isKill)) 180 .addFrameIndex(FI) 181 .addImm(0) 182 .addMemOperand(MMO) 183 .add(predOps(ARMCC::AL)); 184 return; 185 } 186 187 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 188 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 189 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 190 // otherwise). 191 if (Register::isVirtualRegister(SrcReg)) { 192 MachineRegisterInfo *MRI = &MF.getRegInfo(); 193 MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass); 194 } 195 196 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 197 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 198 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 199 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 200 return; 201 } 202 203 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 204 } 205 206 void Thumb2InstrInfo:: 207 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 208 Register DestReg, int FI, 209 const TargetRegisterClass *RC, 210 const TargetRegisterInfo *TRI) const { 211 MachineFunction &MF = *MBB.getParent(); 212 MachineFrameInfo &MFI = MF.getFrameInfo(); 213 MachineMemOperand *MMO = MF.getMachineMemOperand( 214 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 215 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 216 DebugLoc DL; 217 if (I != MBB.end()) DL = I->getDebugLoc(); 218 219 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 220 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 221 .addFrameIndex(FI) 222 .addImm(0) 223 .addMemOperand(MMO) 224 .add(predOps(ARMCC::AL)); 225 return; 226 } 227 228 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 229 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 230 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 231 // otherwise). 232 if (Register::isVirtualRegister(DestReg)) { 233 MachineRegisterInfo *MRI = &MF.getRegInfo(); 234 MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass); 235 } 236 237 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 238 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 239 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 240 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 241 242 if (Register::isPhysicalRegister(DestReg)) 243 MIB.addReg(DestReg, RegState::ImplicitDefine); 244 return; 245 } 246 247 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 248 } 249 250 void Thumb2InstrInfo::expandLoadStackGuard( 251 MachineBasicBlock::iterator MI) const { 252 MachineFunction &MF = *MI->getParent()->getParent(); 253 if (MF.getTarget().isPositionIndependent()) 254 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 255 else 256 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 257 } 258 259 MachineInstr *Thumb2InstrInfo::commuteInstructionImpl(MachineInstr &MI, 260 bool NewMI, 261 unsigned OpIdx1, 262 unsigned OpIdx2) const { 263 switch (MI.getOpcode()) { 264 case ARM::MVE_VMAXNMAf16: 265 case ARM::MVE_VMAXNMAf32: 266 case ARM::MVE_VMINNMAf16: 267 case ARM::MVE_VMINNMAf32: 268 // Don't allow predicated instructions to be commuted. 269 if (getVPTInstrPredicate(MI) != ARMVCC::None) 270 return nullptr; 271 } 272 return ARMBaseInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 273 } 274 275 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 276 MachineBasicBlock::iterator &MBBI, 277 const DebugLoc &dl, Register DestReg, 278 Register BaseReg, int NumBytes, 279 ARMCC::CondCodes Pred, Register PredReg, 280 const ARMBaseInstrInfo &TII, 281 unsigned MIFlags) { 282 if (NumBytes == 0 && DestReg != BaseReg) { 283 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 284 .addReg(BaseReg, RegState::Kill) 285 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 286 return; 287 } 288 289 bool isSub = NumBytes < 0; 290 if (isSub) NumBytes = -NumBytes; 291 292 // If profitable, use a movw or movt to materialize the offset. 293 // FIXME: Use the scavenger to grab a scratch register. 294 if (DestReg != ARM::SP && DestReg != BaseReg && 295 NumBytes >= 4096 && 296 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 297 bool Fits = false; 298 if (NumBytes < 65536) { 299 // Use a movw to materialize the 16-bit constant. 300 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 301 .addImm(NumBytes) 302 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 303 Fits = true; 304 } else if ((NumBytes & 0xffff) == 0) { 305 // Use a movt to materialize the 32-bit constant. 306 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 307 .addReg(DestReg) 308 .addImm(NumBytes >> 16) 309 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 310 Fits = true; 311 } 312 313 if (Fits) { 314 if (isSub) { 315 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 316 .addReg(BaseReg) 317 .addReg(DestReg, RegState::Kill) 318 .add(predOps(Pred, PredReg)) 319 .add(condCodeOp()) 320 .setMIFlags(MIFlags); 321 } else { 322 // Here we know that DestReg is not SP but we do not 323 // know anything about BaseReg. t2ADDrr is an invalid 324 // instruction is SP is used as the second argument, but 325 // is fine if SP is the first argument. To be sure we 326 // do not generate invalid encoding, put BaseReg first. 327 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 328 .addReg(BaseReg) 329 .addReg(DestReg, RegState::Kill) 330 .add(predOps(Pred, PredReg)) 331 .add(condCodeOp()) 332 .setMIFlags(MIFlags); 333 } 334 return; 335 } 336 } 337 338 while (NumBytes) { 339 unsigned ThisVal = NumBytes; 340 unsigned Opc = 0; 341 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 342 // mov sp, rn. Note t2MOVr cannot be used. 343 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 344 .addReg(BaseReg) 345 .setMIFlags(MIFlags) 346 .add(predOps(ARMCC::AL)); 347 BaseReg = ARM::SP; 348 continue; 349 } 350 351 assert((DestReg != ARM::SP || BaseReg == ARM::SP) && 352 "Writing to SP, from other register."); 353 354 // Try to use T1, as it smaller 355 if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) { 356 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 357 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 358 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 359 .addReg(BaseReg) 360 .addImm(ThisVal / 4) 361 .setMIFlags(MIFlags) 362 .add(predOps(ARMCC::AL)); 363 break; 364 } 365 bool HasCCOut = true; 366 int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal); 367 bool ToSP = DestReg == ARM::SP; 368 unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri; 369 unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri; 370 unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12; 371 unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12; 372 Opc = isSub ? t2SUB : t2ADD; 373 // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm 374 if (ImmIsT2SO != -1) { 375 NumBytes = 0; 376 } else if (ThisVal < 4096) { 377 // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp, 378 // sp, imm12 379 Opc = isSub ? t2SUBi12 : t2ADDi12; 380 HasCCOut = false; 381 NumBytes = 0; 382 } else { 383 // Use one T2 instruction to reduce NumBytes 384 // FIXME: Move this to ARMAddressingModes.h? 385 unsigned RotAmt = countLeadingZeros(ThisVal); 386 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 387 NumBytes &= ~ThisVal; 388 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 389 "Bit extraction didn't work?"); 390 } 391 392 // Build the new ADD / SUB. 393 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 394 .addReg(BaseReg, RegState::Kill) 395 .addImm(ThisVal) 396 .add(predOps(ARMCC::AL)) 397 .setMIFlags(MIFlags); 398 if (HasCCOut) 399 MIB.add(condCodeOp()); 400 401 BaseReg = DestReg; 402 } 403 } 404 405 static unsigned 406 negativeOffsetOpcode(unsigned opcode) 407 { 408 switch (opcode) { 409 case ARM::t2LDRi12: return ARM::t2LDRi8; 410 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 411 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 412 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 413 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 414 case ARM::t2STRi12: return ARM::t2STRi8; 415 case ARM::t2STRBi12: return ARM::t2STRBi8; 416 case ARM::t2STRHi12: return ARM::t2STRHi8; 417 case ARM::t2PLDi12: return ARM::t2PLDi8; 418 case ARM::t2PLDWi12: return ARM::t2PLDWi8; 419 case ARM::t2PLIi12: return ARM::t2PLIi8; 420 421 case ARM::t2LDRi8: 422 case ARM::t2LDRHi8: 423 case ARM::t2LDRBi8: 424 case ARM::t2LDRSHi8: 425 case ARM::t2LDRSBi8: 426 case ARM::t2STRi8: 427 case ARM::t2STRBi8: 428 case ARM::t2STRHi8: 429 case ARM::t2PLDi8: 430 case ARM::t2PLDWi8: 431 case ARM::t2PLIi8: 432 return opcode; 433 434 default: 435 llvm_unreachable("unknown thumb2 opcode."); 436 } 437 } 438 439 static unsigned 440 positiveOffsetOpcode(unsigned opcode) 441 { 442 switch (opcode) { 443 case ARM::t2LDRi8: return ARM::t2LDRi12; 444 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 445 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 446 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 447 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 448 case ARM::t2STRi8: return ARM::t2STRi12; 449 case ARM::t2STRBi8: return ARM::t2STRBi12; 450 case ARM::t2STRHi8: return ARM::t2STRHi12; 451 case ARM::t2PLDi8: return ARM::t2PLDi12; 452 case ARM::t2PLDWi8: return ARM::t2PLDWi12; 453 case ARM::t2PLIi8: return ARM::t2PLIi12; 454 455 case ARM::t2LDRi12: 456 case ARM::t2LDRHi12: 457 case ARM::t2LDRBi12: 458 case ARM::t2LDRSHi12: 459 case ARM::t2LDRSBi12: 460 case ARM::t2STRi12: 461 case ARM::t2STRBi12: 462 case ARM::t2STRHi12: 463 case ARM::t2PLDi12: 464 case ARM::t2PLDWi12: 465 case ARM::t2PLIi12: 466 return opcode; 467 468 default: 469 llvm_unreachable("unknown thumb2 opcode."); 470 } 471 } 472 473 static unsigned 474 immediateOffsetOpcode(unsigned opcode) 475 { 476 switch (opcode) { 477 case ARM::t2LDRs: return ARM::t2LDRi12; 478 case ARM::t2LDRHs: return ARM::t2LDRHi12; 479 case ARM::t2LDRBs: return ARM::t2LDRBi12; 480 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 481 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 482 case ARM::t2STRs: return ARM::t2STRi12; 483 case ARM::t2STRBs: return ARM::t2STRBi12; 484 case ARM::t2STRHs: return ARM::t2STRHi12; 485 case ARM::t2PLDs: return ARM::t2PLDi12; 486 case ARM::t2PLDWs: return ARM::t2PLDWi12; 487 case ARM::t2PLIs: return ARM::t2PLIi12; 488 489 case ARM::t2LDRi12: 490 case ARM::t2LDRHi12: 491 case ARM::t2LDRBi12: 492 case ARM::t2LDRSHi12: 493 case ARM::t2LDRSBi12: 494 case ARM::t2STRi12: 495 case ARM::t2STRBi12: 496 case ARM::t2STRHi12: 497 case ARM::t2PLDi12: 498 case ARM::t2PLDWi12: 499 case ARM::t2PLIi12: 500 case ARM::t2LDRi8: 501 case ARM::t2LDRHi8: 502 case ARM::t2LDRBi8: 503 case ARM::t2LDRSHi8: 504 case ARM::t2LDRSBi8: 505 case ARM::t2STRi8: 506 case ARM::t2STRBi8: 507 case ARM::t2STRHi8: 508 case ARM::t2PLDi8: 509 case ARM::t2PLDWi8: 510 case ARM::t2PLIi8: 511 return opcode; 512 513 default: 514 llvm_unreachable("unknown thumb2 opcode."); 515 } 516 } 517 518 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 519 Register FrameReg, int &Offset, 520 const ARMBaseInstrInfo &TII, 521 const TargetRegisterInfo *TRI) { 522 unsigned Opcode = MI.getOpcode(); 523 const MCInstrDesc &Desc = MI.getDesc(); 524 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 525 bool isSub = false; 526 527 MachineFunction &MF = *MI.getParent()->getParent(); 528 const TargetRegisterClass *RegClass = 529 TII.getRegClass(Desc, FrameRegIdx, TRI, MF); 530 531 // Memory operands in inline assembly always use AddrModeT2_i12. 532 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 533 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 534 535 const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm; 536 if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 537 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 538 539 Register PredReg; 540 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL && 541 !MI.definesRegister(ARM::CPSR)) { 542 // Turn it into a move. 543 MI.setDesc(TII.get(ARM::tMOVr)); 544 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 545 // Remove offset and remaining explicit predicate operands. 546 do MI.RemoveOperand(FrameRegIdx+1); 547 while (MI.getNumOperands() > FrameRegIdx+1); 548 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 549 MIB.add(predOps(ARMCC::AL)); 550 return true; 551 } 552 553 bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12); 554 555 if (Offset < 0) { 556 Offset = -Offset; 557 isSub = true; 558 MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri)); 559 } else { 560 MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri)); 561 } 562 563 // Common case: small offset, fits into instruction. 564 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 565 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 566 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 567 // Add cc_out operand if the original instruction did not have one. 568 if (!HasCCOut) 569 MI.addOperand(MachineOperand::CreateReg(0, false)); 570 Offset = 0; 571 return true; 572 } 573 // Another common case: imm12. 574 if (Offset < 4096 && 575 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 576 unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12 577 : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12; 578 MI.setDesc(TII.get(NewOpc)); 579 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 580 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 581 // Remove the cc_out operand. 582 if (HasCCOut) 583 MI.RemoveOperand(MI.getNumOperands()-1); 584 Offset = 0; 585 return true; 586 } 587 588 // Otherwise, extract 8 adjacent bits from the immediate into this 589 // t2ADDri/t2SUBri. 590 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 591 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 592 593 // We will handle these bits from offset, clear them. 594 Offset &= ~ThisImmVal; 595 596 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 597 "Bit extraction didn't work?"); 598 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 599 // Add cc_out operand if the original instruction did not have one. 600 if (!HasCCOut) 601 MI.addOperand(MachineOperand::CreateReg(0, false)); 602 } else { 603 // AddrMode4 and AddrMode6 cannot handle any offset. 604 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 605 return false; 606 607 // AddrModeT2_so cannot handle any offset. If there is no offset 608 // register then we change to an immediate version. 609 unsigned NewOpc = Opcode; 610 if (AddrMode == ARMII::AddrModeT2_so) { 611 Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg(); 612 if (OffsetReg != 0) { 613 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 614 return Offset == 0; 615 } 616 617 MI.RemoveOperand(FrameRegIdx+1); 618 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 619 NewOpc = immediateOffsetOpcode(Opcode); 620 AddrMode = ARMII::AddrModeT2_i12; 621 } 622 623 unsigned NumBits = 0; 624 unsigned Scale = 1; 625 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) { 626 // i8 supports only negative, and i12 supports only positive, so 627 // based on Offset sign convert Opcode to the appropriate 628 // instruction 629 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 630 if (Offset < 0) { 631 NewOpc = negativeOffsetOpcode(Opcode); 632 NumBits = 8; 633 isSub = true; 634 Offset = -Offset; 635 } else { 636 NewOpc = positiveOffsetOpcode(Opcode); 637 NumBits = 12; 638 } 639 } else if (AddrMode == ARMII::AddrMode5) { 640 // VFP address mode. 641 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 642 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 643 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 644 InstrOffs *= -1; 645 NumBits = 8; 646 Scale = 4; 647 Offset += InstrOffs * 4; 648 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 649 if (Offset < 0) { 650 Offset = -Offset; 651 isSub = true; 652 } 653 } else if (AddrMode == ARMII::AddrMode5FP16) { 654 // VFP address mode. 655 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 656 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm()); 657 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub) 658 InstrOffs *= -1; 659 NumBits = 8; 660 Scale = 2; 661 Offset += InstrOffs * 2; 662 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 663 if (Offset < 0) { 664 Offset = -Offset; 665 isSub = true; 666 } 667 } else if (AddrMode == ARMII::AddrModeT2_i7s4 || 668 AddrMode == ARMII::AddrModeT2_i7s2 || 669 AddrMode == ARMII::AddrModeT2_i7) { 670 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 671 unsigned OffsetMask; 672 switch (AddrMode) { 673 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break; 674 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break; 675 default: NumBits = 7; OffsetMask = 0x0; break; 676 } 677 // MCInst operand expects already scaled value. 678 Scale = 1; 679 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!"); 680 (void)OffsetMask; // squash unused-variable warning at -NDEBUG 681 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 682 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 683 NumBits = 8 + 2; 684 // MCInst operand expects already scaled value. 685 Scale = 1; 686 assert((Offset & 3) == 0 && "Can't encode this offset!"); 687 } else if (AddrMode == ARMII::AddrModeT2_ldrex) { 688 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 689 NumBits = 8; // 8 bits scaled by 4 690 Scale = 4; 691 assert((Offset & 3) == 0 && "Can't encode this offset!"); 692 } else { 693 llvm_unreachable("Unsupported addressing mode!"); 694 } 695 696 if (NewOpc != Opcode) 697 MI.setDesc(TII.get(NewOpc)); 698 699 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 700 701 // Attempt to fold address computation 702 // Common case: small offset, fits into instruction. We need to make sure 703 // the register class is correct too, for instructions like the MVE 704 // VLDRH.32, which only accepts low tGPR registers. 705 int ImmedOffset = Offset / Scale; 706 unsigned Mask = (1 << NumBits) - 1; 707 if ((unsigned)Offset <= Mask * Scale && 708 (Register::isVirtualRegister(FrameReg) || 709 RegClass->contains(FrameReg))) { 710 if (Register::isVirtualRegister(FrameReg)) { 711 // Make sure the register class for the virtual register is correct 712 MachineRegisterInfo *MRI = &MF.getRegInfo(); 713 if (!MRI->constrainRegClass(FrameReg, RegClass)) 714 llvm_unreachable("Unable to constrain virtual register class."); 715 } 716 717 // Replace the FrameIndex with fp/sp 718 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 719 if (isSub) { 720 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 721 // FIXME: Not consistent. 722 ImmedOffset |= 1 << NumBits; 723 else 724 ImmedOffset = -ImmedOffset; 725 } 726 ImmOp.ChangeToImmediate(ImmedOffset); 727 Offset = 0; 728 return true; 729 } 730 731 // Otherwise, offset doesn't fit. Pull in what we can to simplify 732 ImmedOffset = ImmedOffset & Mask; 733 if (isSub) { 734 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 735 // FIXME: Not consistent. 736 ImmedOffset |= 1 << NumBits; 737 else { 738 ImmedOffset = -ImmedOffset; 739 if (ImmedOffset == 0) 740 // Change the opcode back if the encoded offset is zero. 741 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 742 } 743 } 744 ImmOp.ChangeToImmediate(ImmedOffset); 745 Offset &= ~(Mask*Scale); 746 } 747 748 Offset = (isSub) ? -Offset : Offset; 749 return Offset == 0 && (Register::isVirtualRegister(FrameReg) || 750 RegClass->contains(FrameReg)); 751 } 752 753 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 754 Register &PredReg) { 755 unsigned Opc = MI.getOpcode(); 756 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 757 return ARMCC::AL; 758 return getInstrPredicate(MI, PredReg); 759 } 760 761 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { 762 const MCInstrDesc &MCID = MI.getDesc(); 763 764 if (!MCID.OpInfo) 765 return -1; 766 767 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 768 if (ARM::isVpred(MCID.OpInfo[i].OperandType)) 769 return i; 770 771 return -1; 772 } 773 774 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, 775 Register &PredReg) { 776 int PIdx = findFirstVPTPredOperandIdx(MI); 777 if (PIdx == -1) { 778 PredReg = 0; 779 return ARMVCC::None; 780 } 781 782 PredReg = MI.getOperand(PIdx+1).getReg(); 783 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm(); 784 } 785 786 void llvm::recomputeVPTBlockMask(MachineInstr &Instr) { 787 assert(isVPTOpcode(Instr.getOpcode()) && "Not a VPST or VPT Instruction!"); 788 789 MachineOperand &MaskOp = Instr.getOperand(0); 790 assert(MaskOp.isImm() && "Operand 0 is not the block mask of the VPT/VPST?!"); 791 792 MachineBasicBlock::iterator Iter = ++Instr.getIterator(), 793 End = Instr.getParent()->end(); 794 795 // Verify that the instruction after the VPT/VPST is predicated (it should 796 // be), and skip it. 797 assert( 798 getVPTInstrPredicate(*Iter) == ARMVCC::Then && 799 "VPT/VPST should be followed by an instruction with a 'then' predicate!"); 800 ++Iter; 801 802 // Iterate over the predicated instructions, updating the BlockMask as we go. 803 ARM::PredBlockMask BlockMask = ARM::PredBlockMask::T; 804 while (Iter != End) { 805 ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*Iter); 806 if (Pred == ARMVCC::None) 807 break; 808 BlockMask = expandPredBlockMask(BlockMask, Pred); 809 ++Iter; 810 } 811 812 // Rewrite the BlockMask. 813 MaskOp.setImm((int64_t)(BlockMask)); 814 } 815